Hi Wei,

> -----Original Message-----
> From: Dai, Wei
> Sent: Wednesday, March 7, 2018 1:06 PM
> To: Lu, Wenzhuo <wenzhuo...@intel.com>; Ananyev, Konstantin 
> <konstantin.anan...@intel.com>
> Cc: dev@dpdk.org; Dai, Wei <wei....@intel.com>
> Subject: [PATCH v2 3/4] net/ixgbe: convert to new Rx offloads API
> 
> Ethdev Rx offloads API has changed since:
> commit ce17eddefc20 ("ethdev: introduce Rx queue offloads API")
> This commit support the new Rx offloads API.
> 
> Signed-off-by: Wei Dai <wei....@intel.com>
> ---
>  drivers/net/ixgbe/ixgbe_ethdev.c          |  93 +++++++++--------
>  drivers/net/ixgbe/ixgbe_ipsec.c           |   8 +-
>  drivers/net/ixgbe/ixgbe_rxtx.c            | 163 
> ++++++++++++++++++++++++++----
>  drivers/net/ixgbe/ixgbe_rxtx.h            |   3 +
>  drivers/net/ixgbe/ixgbe_rxtx_vec_common.h |   2 +-
>  drivers/net/ixgbe/ixgbe_rxtx_vec_neon.c   |   2 +-
>  6 files changed, 205 insertions(+), 66 deletions(-)
> 
> diff --git a/drivers/net/ixgbe/ixgbe_ethdev.c 
> b/drivers/net/ixgbe/ixgbe_ethdev.c
> index 8bb67ba..9437f05 100644
> --- a/drivers/net/ixgbe/ixgbe_ethdev.c
> +++ b/drivers/net/ixgbe/ixgbe_ethdev.c
> @@ -2105,19 +2105,22 @@ ixgbe_vlan_hw_strip_config(struct rte_eth_dev *dev)
>  static int
>  ixgbe_vlan_offload_set(struct rte_eth_dev *dev, int mask)
>  {
> +     struct rte_eth_rxmode *rxmode;
> +     rxmode = &dev->data->dev_conf.rxmode;
> +
>       if (mask & ETH_VLAN_STRIP_MASK) {
>               ixgbe_vlan_hw_strip_config(dev);
>       }
> 
>       if (mask & ETH_VLAN_FILTER_MASK) {
> -             if (dev->data->dev_conf.rxmode.hw_vlan_filter)
> +             if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_FILTER)
>                       ixgbe_vlan_hw_filter_enable(dev);
>               else
>                       ixgbe_vlan_hw_filter_disable(dev);
>       }
> 
>       if (mask & ETH_VLAN_EXTEND_MASK) {
> -             if (dev->data->dev_conf.rxmode.hw_vlan_extend)
> +             if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_EXTEND)
>                       ixgbe_vlan_hw_extend_enable(dev);
>               else
>                       ixgbe_vlan_hw_extend_disable(dev);
> @@ -2332,6 +2335,8 @@ ixgbe_dev_configure(struct rte_eth_dev *dev)
>               IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
>       struct ixgbe_adapter *adapter =
>               (struct ixgbe_adapter *)dev->data->dev_private;
> +     struct rte_eth_dev_info dev_info;
> +     uint64_t rx_offloads;
>       int ret;
> 
>       PMD_INIT_FUNC_TRACE();
> @@ -2343,6 +2348,15 @@ ixgbe_dev_configure(struct rte_eth_dev *dev)
>               return ret;
>       }
> 
> +     ixgbe_dev_info_get(dev, &dev_info);
> +     rx_offloads = dev->data->dev_conf.rxmode.offloads;
> +     if ((rx_offloads & dev_info.rx_offload_capa) != rx_offloads) {
> +             PMD_DRV_LOG(ERR, "Some Rx offloads are not supported "
> +                         "requested 0x%" PRIx64 " supported 0x%" PRIx64,
> +                         rx_offloads, dev_info.rx_offload_capa);
> +             return -ENOTSUP;
> +     }
> +
>       /* set flag to update link status after init */
>       intr->flags |= IXGBE_FLAG_NEED_LINK_UPDATE;
> 
> @@ -3632,30 +3646,9 @@ ixgbe_dev_info_get(struct rte_eth_dev *dev, struct 
> rte_eth_dev_info *dev_info)
>       else
>               dev_info->max_vmdq_pools = ETH_64_POOLS;
>       dev_info->vmdq_queue_num = dev_info->max_rx_queues;
> -     dev_info->rx_offload_capa =
> -             DEV_RX_OFFLOAD_VLAN_STRIP |
> -             DEV_RX_OFFLOAD_IPV4_CKSUM |
> -             DEV_RX_OFFLOAD_UDP_CKSUM  |
> -             DEV_RX_OFFLOAD_TCP_CKSUM  |
> -             DEV_RX_OFFLOAD_CRC_STRIP;
> -
> -     /*
> -      * RSC is only supported by 82599 and x540 PF devices in a non-SR-IOV
> -      * mode.
> -      */
> -     if ((hw->mac.type == ixgbe_mac_82599EB ||
> -          hw->mac.type == ixgbe_mac_X540) &&
> -         !RTE_ETH_DEV_SRIOV(dev).active)
> -             dev_info->rx_offload_capa |= DEV_RX_OFFLOAD_TCP_LRO;
> -
> -     if (hw->mac.type == ixgbe_mac_82599EB ||
> -         hw->mac.type == ixgbe_mac_X540)
> -             dev_info->rx_offload_capa |= DEV_RX_OFFLOAD_MACSEC_STRIP;
> -
> -     if (hw->mac.type == ixgbe_mac_X550 ||
> -         hw->mac.type == ixgbe_mac_X550EM_x ||
> -         hw->mac.type == ixgbe_mac_X550EM_a)
> -             dev_info->rx_offload_capa |= DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM;
> +     dev_info->rx_queue_offload_capa = ixgbe_get_rx_queue_offloads(dev);
> +     dev_info->rx_offload_capa = (ixgbe_get_rx_port_offloads(dev) |
> +                                  dev_info->rx_queue_offload_capa);
> 
>       dev_info->tx_offload_capa =
>               DEV_TX_OFFLOAD_VLAN_INSERT |
> @@ -3675,10 +3668,8 @@ ixgbe_dev_info_get(struct rte_eth_dev *dev, struct 
> rte_eth_dev_info *dev_info)
>               dev_info->tx_offload_capa |= DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM;
> 
>  #ifdef RTE_LIBRTE_SECURITY
> -     if (dev->security_ctx) {
> -             dev_info->rx_offload_capa |= DEV_RX_OFFLOAD_SECURITY;
> +     if (dev->security_ctx)
>               dev_info->tx_offload_capa |= DEV_TX_OFFLOAD_SECURITY;
> -     }
>  #endif
> 
>       dev_info->default_rxconf = (struct rte_eth_rxconf) {
> @@ -3689,6 +3680,7 @@ ixgbe_dev_info_get(struct rte_eth_dev *dev, struct 
> rte_eth_dev_info *dev_info)
>               },
>               .rx_free_thresh = IXGBE_DEFAULT_RX_FREE_THRESH,
>               .rx_drop_en = 0,
> +             .offloads = 0,
>       };
> 
>       dev_info->default_txconf = (struct rte_eth_txconf) {
> @@ -3781,11 +3773,9 @@ ixgbevf_dev_info_get(struct rte_eth_dev *dev,
>               dev_info->max_vmdq_pools = ETH_16_POOLS;
>       else
>               dev_info->max_vmdq_pools = ETH_64_POOLS;
> -     dev_info->rx_offload_capa = DEV_RX_OFFLOAD_VLAN_STRIP |
> -                             DEV_RX_OFFLOAD_IPV4_CKSUM |
> -                             DEV_RX_OFFLOAD_UDP_CKSUM  |
> -                             DEV_RX_OFFLOAD_TCP_CKSUM  |
> -                             DEV_RX_OFFLOAD_CRC_STRIP;
> +     dev_info->rx_queue_offload_capa = ixgbe_get_rx_queue_offloads(dev);
> +     dev_info->rx_offload_capa = (ixgbe_get_rx_port_offloads(dev) |
> +                                  dev_info->rx_queue_offload_capa);
>       dev_info->tx_offload_capa = DEV_TX_OFFLOAD_VLAN_INSERT |
>                               DEV_TX_OFFLOAD_IPV4_CKSUM  |
>                               DEV_TX_OFFLOAD_UDP_CKSUM   |
> @@ -3801,6 +3791,7 @@ ixgbevf_dev_info_get(struct rte_eth_dev *dev,
>               },
>               .rx_free_thresh = IXGBE_DEFAULT_RX_FREE_THRESH,
>               .rx_drop_en = 0,
> +             .offloads = 0,
>       };
> 
>       dev_info->default_txconf = (struct rte_eth_txconf) {
> @@ -4894,10 +4885,12 @@ ixgbe_dev_mtu_set(struct rte_eth_dev *dev, uint16_t 
> mtu)
> 
>       /* switch to jumbo mode if needed */
>       if (frame_size > ETHER_MAX_LEN) {
> -             dev->data->dev_conf.rxmode.jumbo_frame = 1;
> +             dev->data->dev_conf.rxmode.offloads |=
> +                     DEV_RX_OFFLOAD_JUMBO_FRAME;
>               hlreg0 |= IXGBE_HLREG0_JUMBOEN;
>       } else {
> -             dev->data->dev_conf.rxmode.jumbo_frame = 0;
> +             dev->data->dev_conf.rxmode.offloads &=
> +                     ~DEV_RX_OFFLOAD_JUMBO_FRAME;
>               hlreg0 &= ~IXGBE_HLREG0_JUMBOEN;
>       }
>       IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg0);
> @@ -4946,23 +4939,34 @@ ixgbevf_dev_configure(struct rte_eth_dev *dev)
>       struct rte_eth_conf *conf = &dev->data->dev_conf;
>       struct ixgbe_adapter *adapter =
>                       (struct ixgbe_adapter *)dev->data->dev_private;
> +     struct rte_eth_dev_info dev_info;
> +     uint64_t rx_offloads;
> 
>       PMD_INIT_LOG(DEBUG, "Configured Virtual Function port id: %d",
>                    dev->data->port_id);
> 
> +     ixgbevf_dev_info_get(dev, &dev_info);
> +     rx_offloads = dev->data->dev_conf.rxmode.offloads;
> +     if ((rx_offloads & dev_info.rx_offload_capa) != rx_offloads) {
> +             PMD_DRV_LOG(ERR, "Some Rx offloads are not supported "
> +                         "requested 0x%" PRIx64 " supported 0x%" PRIx64,
> +                         rx_offloads, dev_info.rx_offload_capa);
> +             return -ENOTSUP;
> +     }
> +
>       /*
>        * VF has no ability to enable/disable HW CRC
>        * Keep the persistent behavior the same as Host PF
>        */
>  #ifndef RTE_LIBRTE_IXGBE_PF_DISABLE_STRIP_CRC
> -     if (!conf->rxmode.hw_strip_crc) {
> +     if (!(conf->rxmode.offloads & DEV_RX_OFFLOAD_CRC_STRIP)) {
>               PMD_INIT_LOG(NOTICE, "VF can't disable HW CRC Strip");
> -             conf->rxmode.hw_strip_crc = 1;
> +             conf->rxmode.offloads |= DEV_RX_OFFLOAD_CRC_STRIP;
>       }
>  #else
> -     if (conf->rxmode.hw_strip_crc) {
> +     if (conf->rxmode.offloads & DEV_RX_OFFLOAD_CRC_STRIP) {
>               PMD_INIT_LOG(NOTICE, "VF can't enable HW CRC Strip");
> -             conf->rxmode.hw_strip_crc = 0;
> +             conf->rxmode.offloads &= ~DEV_RX_OFFLOAD_CRC_STRIP;
>       }
>  #endif
> 
> @@ -5850,6 +5854,7 @@ ixgbe_set_queue_rate_limit(struct rte_eth_dev *dev,
>                          uint16_t queue_idx, uint16_t tx_rate)
>  {
>       struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
> +     struct rte_eth_rxmode *rxmode;
>       uint32_t rf_dec, rf_int;
>       uint32_t bcnrc_val;
>       uint16_t link_speed = dev->data->dev_link.link_speed;
> @@ -5871,14 +5876,14 @@ ixgbe_set_queue_rate_limit(struct rte_eth_dev *dev,
>               bcnrc_val = 0;
>       }
> 
> +     rxmode = &dev->data->dev_conf.rxmode;
>       /*
>        * Set global transmit compensation time to the MMW_SIZE in RTTBCNRM
>        * register. MMW_SIZE=0x014 if 9728-byte jumbo is supported, otherwise
>        * set as 0x4.
>        */
> -     if ((dev->data->dev_conf.rxmode.jumbo_frame == 1) &&
> -             (dev->data->dev_conf.rxmode.max_rx_pkt_len >=
> -                             IXGBE_MAX_JUMBO_FRAME_SIZE))
> +     if ((rxmode->offloads & DEV_RX_OFFLOAD_JUMBO_FRAME) &&
> +         (rxmode->max_rx_pkt_len >= IXGBE_MAX_JUMBO_FRAME_SIZE))
>               IXGBE_WRITE_REG(hw, IXGBE_RTTBCNRM,
>                       IXGBE_MMW_SIZE_JUMBO_FRAME);
>       else
> @@ -6225,7 +6230,7 @@ ixgbevf_dev_set_mtu(struct rte_eth_dev *dev, uint16_t 
> mtu)
>       /* refuse mtu that requires the support of scattered packets when this
>        * feature has not been enabled before.
>        */
> -     if (!rx_conf->enable_scatter &&
> +     if (!(rx_conf->offloads & DEV_RX_OFFLOAD_SCATTER) &&
>           (max_frame + 2 * IXGBE_VLAN_TAG_SIZE >
>            dev->data->min_rx_buf_size - RTE_PKTMBUF_HEADROOM))
>               return -EINVAL;
> diff --git a/drivers/net/ixgbe/ixgbe_ipsec.c b/drivers/net/ixgbe/ixgbe_ipsec.c
> index 176ec0f..29e4728 100644
> --- a/drivers/net/ixgbe/ixgbe_ipsec.c
> +++ b/drivers/net/ixgbe/ixgbe_ipsec.c
> @@ -598,13 +598,15 @@ ixgbe_crypto_enable_ipsec(struct rte_eth_dev *dev)
>  {
>       struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
>       uint32_t reg;
> +     uint64_t rx_offloads;
> 
> +     rx_offloads = dev->data->dev_conf.rxmode.offloads;
>       /* sanity checks */
> -     if (dev->data->dev_conf.rxmode.enable_lro) {
> +     if (rx_offloads & DEV_RX_OFFLOAD_TCP_LRO) {
>               PMD_DRV_LOG(ERR, "RSC and IPsec not supported");
>               return -1;
>       }
> -     if (!dev->data->dev_conf.rxmode.hw_strip_crc) {
> +     if (!(rx_offloads & DEV_RX_OFFLOAD_CRC_STRIP)) {
>               PMD_DRV_LOG(ERR, "HW CRC strip needs to be enabled for IPsec");
>               return -1;
>       }
> @@ -624,7 +626,7 @@ ixgbe_crypto_enable_ipsec(struct rte_eth_dev *dev)
>       reg |= IXGBE_HLREG0_TXCRCEN | IXGBE_HLREG0_RXCRCSTRP;
>       IXGBE_WRITE_REG(hw, IXGBE_HLREG0, reg);
> 
> -     if (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_SECURITY) {
> +     if (rx_offloads & DEV_RX_OFFLOAD_SECURITY) {
>               IXGBE_WRITE_REG(hw, IXGBE_SECRXCTRL, 0);
>               reg = IXGBE_READ_REG(hw, IXGBE_SECRXCTRL);
>               if (reg != 0) {
> diff --git a/drivers/net/ixgbe/ixgbe_rxtx.c b/drivers/net/ixgbe/ixgbe_rxtx.c
> index 5c45eb4..a5d4822 100644
> --- a/drivers/net/ixgbe/ixgbe_rxtx.c
> +++ b/drivers/net/ixgbe/ixgbe_rxtx.c
> @@ -2769,6 +2769,98 @@ ixgbe_reset_rx_queue(struct ixgbe_adapter *adapter, 
> struct ixgbe_rx_queue *rxq)
>  #endif
>  }
> 
> +static int
> +ixgbe_is_vf(struct rte_eth_dev *dev)
> +{
> +     struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
> +
> +     switch (hw->mac.type) {
> +     case ixgbe_mac_82599_vf:
> +     case ixgbe_mac_X540_vf:
> +     case ixgbe_mac_X550_vf:
> +     case ixgbe_mac_X550EM_x_vf:
> +     case ixgbe_mac_X550EM_a_vf:
> +             return 1;
> +     default:
> +             return 0;
> +     }
> +}
> +
> +uint64_t
> +ixgbe_get_rx_queue_offloads(struct rte_eth_dev *dev)
> +{
> +     uint64_t offloads;
> +     struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
> +
> +     offloads = DEV_RX_OFFLOAD_HEADER_SPLIT;

As I can see I ixgbe all header_split code is enabled only if 
RTE_HEADER_SPLIT_ENABLE is on.
It is off by default and I doubt anyone really using it these days.
So I think the best thing would be not to advertise  
DEV_RX_OFFLOAD_HEADER_SPLIT for ixgbe at all,
and probably remove related code.
If you'd prefer to keep it, then at least we should set that capability only
at #ifdef RTE_HEADER_SPLIT_ENABLE.
Another thing -         it should be per port, not per queue.
Thought I think better is just to remove it completely.

> +     if (hw->mac.type != ixgbe_mac_82598EB)
> +             offloads |= DEV_RX_OFFLOAD_VLAN_STRIP;
> +
> +     return offloads;
> +}
> +
> +uint64_t
> +ixgbe_get_rx_port_offloads(struct rte_eth_dev *dev)
> +{
> +     uint64_t offloads;
> +     struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
> +
> +     offloads = DEV_RX_OFFLOAD_IPV4_CKSUM  |
> +                DEV_RX_OFFLOAD_UDP_CKSUM   |
> +                DEV_RX_OFFLOAD_TCP_CKSUM   |
> +                DEV_RX_OFFLOAD_CRC_STRIP   |
> +                DEV_RX_OFFLOAD_JUMBO_FRAME |
> +                DEV_RX_OFFLOAD_SCATTER;
> +
> +     if (hw->mac.type == ixgbe_mac_82598EB)
> +             offloads |= DEV_RX_OFFLOAD_VLAN_STRIP;
> +
> +     if (ixgbe_is_vf(dev) == 0)
> +             offloads |= (DEV_RX_OFFLOAD_VLAN_FILTER |
> +                          DEV_RX_OFFLOAD_VLAN_EXTEND);
> +
> +     /*
> +      * RSC is only supported by 82599 and x540 PF devices in a non-SR-IOV
> +      * mode.
> +      */
> +     if ((hw->mac.type == ixgbe_mac_82599EB ||
> +          hw->mac.type == ixgbe_mac_X540) &&
> +         !RTE_ETH_DEV_SRIOV(dev).active)
> +             offloads |= DEV_RX_OFFLOAD_TCP_LRO;
> +
> +     if (hw->mac.type == ixgbe_mac_82599EB ||
> +         hw->mac.type == ixgbe_mac_X540)
> +             offloads |= DEV_RX_OFFLOAD_MACSEC_STRIP;
> +
> +     if (hw->mac.type == ixgbe_mac_X550 ||
> +         hw->mac.type == ixgbe_mac_X550EM_x ||
> +         hw->mac.type == ixgbe_mac_X550EM_a)
> +             offloads |= DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM;
> +
> +#ifdef RTE_LIBRTE_SECURITY

I don't think you need that ifdef here.

> +     if (dev->security_ctx)
> +             offloads |= DEV_RX_OFFLOAD_SECURITY;
> +#endif
> +
> +     return offloads;
> +}
> +
> +static int
> +ixgbe_check_rx_queue_offloads(struct rte_eth_dev *dev, uint64_t requested)
> +{
> +     uint64_t port_offloads = dev->data->dev_conf.rxmode.offloads;
> +     uint64_t queue_supported = ixgbe_get_rx_queue_offloads(dev);
> +     uint64_t port_supported = ixgbe_get_rx_port_offloads(dev);
> +
> +     if ((requested & (queue_supported | port_supported)) != requested)
> +             return 0;
> +
> +     if ((port_offloads ^ requested) & port_supported)

Could you explain a bit more what are you cheking here?
As I can see:
 (port_offloads ^ requested) - that's a diff between already set and newly
requested offloads.
Then you check if that diff consists of supported by port offloads,
and if yes you return an error?  
Konstantin

> +             return 0;
> +
> +     return 1;
> +}
> +

Reply via email to