Re: [dpdk-dev] [PATCH v5 03/20] event/sw: add device capabilities function

2017-03-25 Thread Jerin Jacob
On Fri, Mar 24, 2017 at 04:52:58PM +, Harry van Haaren wrote:
> From: Bruce Richardson 
> 
> Add in the info_get function to return details on the queues, flow,
> prioritization capabilities, etc. that this device has.
> 
> Signed-off-by: Bruce Richardson 
> Signed-off-by: Harry van Haaren 


Acked-by: Jerin Jacob 

> ---
>  drivers/event/sw/sw_evdev.c | 23 +++
>  1 file changed, 23 insertions(+)
> 
> diff --git a/drivers/event/sw/sw_evdev.c b/drivers/event/sw/sw_evdev.c
> index 4de9bc1..9d8517a 100644
> --- a/drivers/event/sw/sw_evdev.c
> +++ b/drivers/event/sw/sw_evdev.c
> @@ -44,6 +44,28 @@
>  #define SCHED_QUANTA_ARG "sched_quanta"
>  #define CREDIT_QUANTA_ARG "credit_quanta"
>  
> +static void
> +sw_info_get(struct rte_eventdev *dev, struct rte_event_dev_info *info)
> +{
> + RTE_SET_USED(dev);
> +
> + static const struct rte_event_dev_info evdev_sw_info = {
> + .driver_name = SW_PMD_NAME,
> + .max_event_queues = RTE_EVENT_MAX_QUEUES_PER_DEV,
> + .max_event_queue_flows = SW_QID_NUM_FIDS,
> + .max_event_queue_priority_levels = SW_Q_PRIORITY_MAX,
> + .max_event_priority_levels = SW_IQS_MAX,
> + .max_event_ports = SW_PORTS_MAX,
> + .max_event_port_dequeue_depth = MAX_SW_CONS_Q_DEPTH,
> + .max_event_port_enqueue_depth = MAX_SW_PROD_Q_DEPTH,
> + .max_num_events = SW_INFLIGHT_EVENTS_TOTAL,
> + .event_dev_cap = (RTE_EVENT_DEV_CAP_QUEUE_QOS |
> + RTE_EVENT_DEV_CAP_EVENT_QOS),
> + };
> +
> + *info = evdev_sw_info;
> +}
> +
>  static int
>  assign_numa_node(const char *key __rte_unused, const char *value, void 
> *opaque)
>  {
> @@ -78,6 +100,7 @@ static int
>  sw_probe(const char *name, const char *params)
>  {
>   static const struct rte_eventdev_ops evdev_sw_ops = {
> + .dev_infos_get = sw_info_get,
>   };
>  
>   static const char *const args[] = {
> -- 
> 2.7.4
> 


Re: [dpdk-dev] [PATCH 00/38] Remove struct eth_driver

2017-03-25 Thread Jan Blunck
On Thu, Mar 23, 2017 at 4:34 PM, Stephen Hemminger
 wrote:
> On Mon,  6 Mar 2017 10:59:52 +0100
> Jan Blunck  wrote:
>
>> This series is removing the PCI specific struct eth_driver from rte_ether. 
>> The
>> PCI drivers are changed to use the newly introduced header-only helpers
>> instead. Although the virtual drivers did not make use of the ethdev's driver
>> field they are converted to use the VDEV specific allocation helpers. The
>> motivation for this change is to properly embed a reference to the generic
>> rte_device in the ethdev.
>>
>> The series is based on:
>>
>> * http://dpdk.org/dev/patchwork/patch/20416/
>> * http://dpdk.org/dev/patchwork/patch/20417/
>> * my "Rework vdev probing to use rte_bus infrastructure" series
>> * http://dpdk.org/dev/patchwork/patch/21058/
>>
>> If requested I can push a tree with all dependent patches.
>>
>
> Could you put a tree with the patches up on github?

https://github.com/jblunck/dpdk/commits/eth_driver/20170306


[dpdk-dev] [PATCH 1/1] net/i40e: check return value of rte_zmalloc

2017-03-25 Thread caihe
Hi helin,

There is a bug without check the return value of alloc memory in function 
i40evf_add_del_all_mac_addr,
if we should fix it as below:

diff --git a/drivers/net/i40e/i40e_ethdev_vf.c 
b/drivers/net/i40e/i40e_ethdev_vf.c
index 55fd344..37ea7ac 100644
--- a/drivers/net/i40e/i40e_ethdev_vf.c
+++ b/drivers/net/i40e/i40e_ethdev_vf.c
@@ -2014,6 +2014,11 @@ static int i40evf_dev_xstats_get(struct rte_eth_dev *dev,
}
 
list = rte_zmalloc("i40evf_del_mac_buffer", len, 0);
+   if (!list) {
+   PMD_DRV_LOG(ERR, "fail to alloc memory, abort execute 
command %s",
+   add ? "OP_ADD_ETHER_ADDRESS" : 
"OP_DEL_ETHER_ADDRESS");
+   return;
+   }
 
for (i = begin; i < next_begin; i++) {
addr = &dev->data->mac_addrs[i];

Best Regards



[dpdk-dev] [PATCH 1/1] net/cxgbe: check return value of malloc

2017-03-25 Thread caihe
Hi rahul,

There is a bug without check the return value of alloc memory in function 
t4_wr_mbox_meat_timeout, if we should fix it as below:

diff --git a/drivers/net/cxgbe/base/t4_hw.c b/drivers/net/cxgbe/base/t4_hw.c
index c089b06..7402a5f 100644
--- a/drivers/net/cxgbe/base/t4_hw.c
+++ b/drivers/net/cxgbe/base/t4_hw.c
@@ -359,6 +359,10 @@ int t4_wr_mbox_meat_timeout(struct adapter *adap, int mbox,
struct mbox_entry entry;
u32 pcie_fw = 0;
 
+   if (!temp) {
+   return -ENOMEM;
+   }
+
if ((size & 15) || size > MBOX_LEN) {
free(temp);
return -EINVAL;

Best Regards



Re: [dpdk-dev] [PATCH v5 04/20] event/sw: add configure function

2017-03-25 Thread Jerin Jacob
On Fri, Mar 24, 2017 at 04:52:59PM +, Harry van Haaren wrote:
> From: Bruce Richardson 
> 
> Signed-off-by: Bruce Richardson 
> Signed-off-by: Harry van Haaren 
> ---
>  drivers/event/sw/sw_evdev.c | 15 +++
>  drivers/event/sw/sw_evdev.h | 11 +++
>  2 files changed, 26 insertions(+)
> 
> diff --git a/drivers/event/sw/sw_evdev.c b/drivers/event/sw/sw_evdev.c
> index 9d8517a..28a2326 100644
> --- a/drivers/event/sw/sw_evdev.c
> +++ b/drivers/event/sw/sw_evdev.c
> @@ -44,6 +44,20 @@
>  #define SCHED_QUANTA_ARG "sched_quanta"
>  #define CREDIT_QUANTA_ARG "credit_quanta"
>  
> +static int
> +sw_dev_configure(const struct rte_eventdev *dev)
> +{
> + struct sw_evdev *sw = sw_pmd_priv(dev);
> + const struct rte_eventdev_data *data = dev->data;
> + const struct rte_event_dev_config *conf = &data->dev_conf;
> +
> + sw->qid_count = conf->nb_event_queues;
> + sw->port_count = conf->nb_event_ports;
> + sw->nb_events_limit = conf->nb_events_limit;

I think, we can add a check here to detect the unavailability of
per dequeue timeout support in the configure stage.

if (conf->event_dev_cfg & RTE_EVENT_DEV_CFG_PER_DEQUEUE_TIMEOUT)
return -ENOTSUP;


With that change,

Acked-by: Jerin Jacob 


Re: [dpdk-dev] [PATCH v5 05/20] event/sw: add fns to return default port/queue config

2017-03-25 Thread Jerin Jacob
On Fri, Mar 24, 2017 at 04:53:00PM +, Harry van Haaren wrote:
> From: Bruce Richardson 
> 
> Signed-off-by: Bruce Richardson 

Acked-by: Jerin Jacob 

> ---
>  drivers/event/sw/sw_evdev.c | 32 
>  1 file changed, 32 insertions(+)
> 
> diff --git a/drivers/event/sw/sw_evdev.c b/drivers/event/sw/sw_evdev.c
> index 28a2326..d1fa3a7 100644
> --- a/drivers/event/sw/sw_evdev.c
> +++ b/drivers/event/sw/sw_evdev.c
> @@ -44,6 +44,35 @@
>  #define SCHED_QUANTA_ARG "sched_quanta"
>  #define CREDIT_QUANTA_ARG "credit_quanta"
>  
> +static void
> +sw_queue_def_conf(struct rte_eventdev *dev, uint8_t queue_id,
> +  struct rte_event_queue_conf *conf)
> +{
> + RTE_SET_USED(dev);
> + RTE_SET_USED(queue_id);
> +
> + static const struct rte_event_queue_conf default_conf = {
> + .nb_atomic_flows = 4096,
> + .nb_atomic_order_sequences = 1,
> + .event_queue_cfg = RTE_EVENT_QUEUE_CFG_ATOMIC_ONLY,
> + .priority = RTE_EVENT_DEV_PRIORITY_NORMAL,
> + };
> +
> + *conf = default_conf;
> +}
> +
> +static void
> +sw_port_def_conf(struct rte_eventdev *dev, uint8_t port_id,
> +  struct rte_event_port_conf *port_conf)
> +{
> + RTE_SET_USED(dev);
> + RTE_SET_USED(port_id);
> +
> + port_conf->new_event_threshold = 1024;
> + port_conf->dequeue_depth = 16;
> + port_conf->enqueue_depth = 16;
> +}
> +
>  static int
>  sw_dev_configure(const struct rte_eventdev *dev)
>  {
> @@ -116,6 +145,9 @@ sw_probe(const char *name, const char *params)
>   static const struct rte_eventdev_ops evdev_sw_ops = {
>   .dev_configure = sw_dev_configure,
>   .dev_infos_get = sw_info_get,
> +
> + .queue_def_conf = sw_queue_def_conf,
> + .port_def_conf = sw_port_def_conf,
>   };
>  
>   static const char *const args[] = {
> -- 
> 2.7.4
> 


Re: [dpdk-dev] [PATCH v4 1/5] net/i40e: add pipeline personalization profile processing

2017-03-25 Thread Chilikin, Andrey
Hi Beilei

> -Original Message-
> From: Xing, Beilei
> Sent: Saturday, March 25, 2017 4:04 AM
> To: Chilikin, Andrey ; Wu, Jingjing
> 
> Cc: Zhang, Helin ; dev@dpdk.org
> Subject: RE: [dpdk-dev] [PATCH v4 1/5] net/i40e: add pipeline personalization
> profile processing
> 
> Hi Andrey,
> 
> > -Original Message-
> > From: Chilikin, Andrey
> > Sent: Friday, March 24, 2017 10:53 PM
> > To: Xing, Beilei ; Wu, Jingjing
> > 
> > Cc: Zhang, Helin ; dev@dpdk.org
> > Subject: RE: [dpdk-dev] [PATCH v4 1/5] net/i40e: add pipeline
> > personalization profile processing
> >
> > Hi Beilei,
> >
> > > -Original Message-
> > > From: dev [mailto:dev-boun...@dpdk.org] On Behalf Of Beilei Xing
> > > Sent: Friday, March 24, 2017 10:19 AM
> > > To: Wu, Jingjing 
> > > Cc: Zhang, Helin ; dev@dpdk.org
> > > Subject: [dpdk-dev] [PATCH v4 1/5] net/i40e: add pipeline
> > > personalization profile processing
> > >
> > > Add support for adding a pipeline personalization profile package.
> > >
> > > Signed-off-by: Beilei Xing 
> > > ---
> > >  app/test-pmd/cmdline.c|   1 +
> > >  drivers/net/i40e/i40e_ethdev.c| 198
> > > ++
> > >  drivers/net/i40e/rte_pmd_i40e.h   |  51 
> > >  drivers/net/i40e/rte_pmd_i40e_version.map |   6 +
> > >  4 files changed, 256 insertions(+)
> > >
> > > diff --git a/app/test-pmd/cmdline.c b/app/test-pmd/cmdline.c index
> > > 47f935d..6e0625d 100644
> > > --- a/app/test-pmd/cmdline.c
> > > +++ b/app/test-pmd/cmdline.c
> > > @@ -37,6 +37,7 @@
> > >  #include 
> > >  #include 
> > >  #include 
> > > +#include 
> > >  #include 
> > >  #include 
> > >  #include 
> > > diff --git a/drivers/net/i40e/i40e_ethdev.c
> > > b/drivers/net/i40e/i40e_ethdev.c index 3702214..bea593f 100644
> > > --- a/drivers/net/i40e/i40e_ethdev.c
> > > +++ b/drivers/net/i40e/i40e_ethdev.c
> > > @@ -11259,3 +11259,201 @@ rte_pmd_i40e_reset_vf_stats(uint8_t
> > port,
> > >
> > >   return 0;
> > >  }
> > > +
> > > +static void
> > > +i40e_generate_profile_info_sec(char *name, struct i40e_ppp_version
> > > *version,
> > > +uint32_t track_id, uint8_t *profile_info_sec,
> > > +bool add)
> > > +{
> > > + struct i40e_profile_section_header *sec = NULL;
> > > + struct i40e_profile_info *pinfo;
> > > +
> > > + sec = (struct i40e_profile_section_header *)profile_info_sec;
> > > + sec->tbl_size = 1;
> > > + sec->data_end = sizeof(struct i40e_profile_section_header) +
> > > + sizeof(struct i40e_profile_info);
> > > + sec->section.type = SECTION_TYPE_INFO;
> > > + sec->section.offset = sizeof(struct i40e_profile_section_header);
> > > + sec->section.size = sizeof(struct i40e_profile_info);
> > > + pinfo = (struct i40e_profile_info *)(profile_info_sec +
> > > +  sec->section.offset);
> > > + pinfo->track_id = track_id;
> > > + memcpy(pinfo->name, name, I40E_PPP_NAME_SIZE);
> > > + memcpy(&pinfo->version, version, sizeof(struct i40e_ppp_version));
> > > + if (add)
> > > + pinfo->op = I40E_PPP_ADD_TRACKID;
> > > + else
> > > + pinfo->op = I40E_PPP_REMOVE_TRACKID; }
> > > +
> > > +static enum i40e_status_code
> > > +i40e_add_rm_profile_info(struct i40e_hw *hw, uint8_t
> > > +*profile_info_sec) {
> > > + enum i40e_status_code status = I40E_SUCCESS;
> > > + struct i40e_profile_section_header *sec;
> > > + uint32_t track_id;
> > > + uint32_t offset = 0, info = 0;
> > > +
> > > + sec = (struct i40e_profile_section_header *)profile_info_sec;
> > > + track_id = ((struct i40e_profile_info *)(profile_info_sec +
> > > +  sec->section.offset))->track_id;
> > > +
> > > + status = i40e_aq_write_ppp(hw, (void *)sec, sec->data_end,
> > > +track_id, &offset, &info, NULL);
> > > + if (status)
> > > + PMD_DRV_LOG(ERR, "Failed to add/remove profile info: "
> > > + "offset %d, info %d",
> > > + offset, info);
> > > +
> > > + return status;
> > > +}
> > > +
> > > +#define I40E_PROFILE_INFO_SIZE 48
> > > +#define I40E_MAX_PROFILE_NUM 16
> > > +
> > > +/* Check if the profile info exists */ static int
> > > +i40e_check_profile_info(uint8_t port, uint8_t *profile_info_sec) {
> > > + struct rte_eth_dev *dev = &rte_eth_devices[port];
> > > + struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data-
> > > >dev_private);
> > > + uint8_t *buff;
> > > + struct rte_pmd_i40e_profile_list *p_list;
> > > + struct rte_pmd_i40e_profile_info *pinfo, *p;
> > > + uint32_t i;
> > > + int ret;
> > > +
> > > + buff = rte_zmalloc("pinfo_list",
> > > +(I40E_PROFILE_INFO_SIZE *
> > > I40E_MAX_PROFILE_NUM + 4),
> > > +0);
> > > + if (!buff) {
> > > + PMD_DRV_LOG(ERR, "failed to allocate memory");
> > > + return -1;
> > > + }
> > > +
> > > + ret = i40e_aq_get_ppp_list(hw, (void *)buff,
> > > +   (I40E_PROFILE_INFO_SIZE * I40E_MAX