Removing wrong mails from the To and Cc. Sorry.

On Mon, Jun 25, 2018 at 04:20:57PM +0200, Olivier Matz wrote:
> The Tx descriptor status api was not behaving as expected. This API is
> used to inspect the content of the descriptors in the Tx ring to
> determine the length of the Tx queue.
> 
> Since the software advances the tail pointer and the hardware advances
> the head pointer, the Tx queue is located before txq->tx_tail in the
> ring. Therefore, a call to rte_eth_tx_descriptor_status(..., offset=20)
> should inspect the 20th descriptor before the tail, not after.
> 
> As before, we still need to take care about only checking descriptors
> that have the RS bit.
> 
> Additionally, we can avoid an access to the ring if offset is greater or
> equal to nb_tx_desc - nb_tx_free.
> 
> Fixes: a2919e13d95e ("net/ixgbe: implement descriptor status API")
> Signed-off-by: Olivier Matz <olivier.m...@6wind.com>
> Signed-off-by: Didier Pallard <didier.pall...@6wind.com>
> ---
> 
> Hi Wei, Hi Qi,
> 
> We also recently found some issues in Tx descriptor status API for ixgbe,
> i40, e1000, igb. I'm preparing a clean patchset for all of them.
> 
> Here is already the patch for ixgbe, please let me know what you think.
> 
> The API comment of rte_eth_tx_descriptor_status() is incorrect and should
> be fixed too. The reference descriptor (when offset = 0) is not where the
> next packet will be sent, but where the latest packet has been enqueued.
> 
> Regards,
> Olivier
> 
> 
> 
>  drivers/net/ixgbe/ixgbe_rxtx.c | 45 
> +++++++++++++++++++++++++++++++-----------
>  drivers/net/ixgbe/ixgbe_rxtx.h |  1 +
>  2 files changed, 34 insertions(+), 12 deletions(-)
> 
> diff --git a/drivers/net/ixgbe/ixgbe_rxtx.c b/drivers/net/ixgbe/ixgbe_rxtx.c
> index 3e13d26ae..384587cc6 100644
> --- a/drivers/net/ixgbe/ixgbe_rxtx.c
> +++ b/drivers/net/ixgbe/ixgbe_rxtx.c
> @@ -2606,10 +2606,15 @@ ixgbe_dev_tx_queue_setup(struct rte_eth_dev *dev,
>           hw->mac.type == ixgbe_mac_X540_vf ||
>           hw->mac.type == ixgbe_mac_X550_vf ||
>           hw->mac.type == ixgbe_mac_X550EM_x_vf ||
> -         hw->mac.type == ixgbe_mac_X550EM_a_vf)
> +         hw->mac.type == ixgbe_mac_X550EM_a_vf) {
>               txq->tdt_reg_addr = IXGBE_PCI_REG_ADDR(hw, 
> IXGBE_VFTDT(queue_idx));
> -     else
> +             txq->tdh_reg_addr = IXGBE_PCI_REG_ADDR(hw,
> +                             IXGBE_VFTDH(queue_idx));
> +     } else {
>               txq->tdt_reg_addr = IXGBE_PCI_REG_ADDR(hw, 
> IXGBE_TDT(txq->reg_idx));
> +             txq->tdh_reg_addr = IXGBE_PCI_REG_ADDR(hw,
> +                             IXGBE_TDH(txq->reg_idx));
> +     }
>  
>       txq->tx_ring_phys_addr = tz->iova;
>       txq->tx_ring = (union ixgbe_adv_tx_desc *) tz->addr;
> @@ -3140,22 +3145,38 @@ ixgbe_dev_tx_descriptor_status(void *tx_queue, 
> uint16_t offset)
>  {
>       struct ixgbe_tx_queue *txq = tx_queue;
>       volatile uint32_t *status;
> -     uint32_t desc;
> +     int32_t desc, dd;
>  
>       if (unlikely(offset >= txq->nb_tx_desc))
>               return -EINVAL;
> +     if (offset >= txq->nb_tx_desc - txq->nb_tx_free)
> +             return RTE_ETH_TX_DESC_DONE;
> +
> +     desc = txq->tx_tail - offset - 1;
> +     if (desc < 0)
> +             desc += txq->nb_tx_desc;
>  
> -     desc = txq->tx_tail + offset;
> -     /* go to next desc that has the RS bit */
> -     desc = ((desc + txq->tx_rs_thresh - 1) / txq->tx_rs_thresh) *
> -             txq->tx_rs_thresh;
> -     if (desc >= txq->nb_tx_desc) {
> -             desc -= txq->nb_tx_desc;
> -             if (desc >= txq->nb_tx_desc)
> -                     desc -= txq->nb_tx_desc;
> +     /* offset is too small, no other way than reading PCI reg */
> +     if (unlikely(offset < txq->tx_rs_thresh)) {
> +             int16_t tx_head, queue_size;
> +             tx_head = ixgbe_read_addr(txq->tdh_reg_addr);
> +             queue_size = txq->tx_tail - tx_head;
> +             if (queue_size < 0)
> +                     queue_size += txq->nb_tx_desc;
> +             return queue_size > offset ? RTE_ETH_TX_DESC_FULL :
> +                     RTE_ETH_TX_DESC_DONE;
>       }
>  
> -     status = &txq->tx_ring[desc].wb.status;
> +     /* index of the dd bit to look at */
> +     dd = (desc / txq->tx_rs_thresh + 1) * txq->tx_rs_thresh - 1;
> +
> +     /* In full featured mode, RS bit is only set in the last descriptor */
> +     /* of a multisegments packet */
> +     if (!((txq->offloads == 0) &&
> +           (txq->tx_rs_thresh >= RTE_PMD_IXGBE_TX_MAX_BURST)))
> +             dd = txq->sw_ring[dd].last_id;
> +
> +     status = &txq->tx_ring[dd].wb.status;
>       if (*status & rte_cpu_to_le_32(IXGBE_ADVTXD_STAT_DD))
>               return RTE_ETH_TX_DESC_DONE;
>  
> diff --git a/drivers/net/ixgbe/ixgbe_rxtx.h b/drivers/net/ixgbe/ixgbe_rxtx.h
> index 39378f754..384f6324d 100644
> --- a/drivers/net/ixgbe/ixgbe_rxtx.h
> +++ b/drivers/net/ixgbe/ixgbe_rxtx.h
> @@ -201,6 +201,7 @@ struct ixgbe_tx_queue {
>               struct ixgbe_tx_entry_v *sw_ring_v; /**< address of SW ring for 
> vector PMD */
>       };
>       volatile uint32_t   *tdt_reg_addr; /**< Address of TDT register. */
> +     volatile uint32_t   *tdh_reg_addr; /**< Address of TDH register. */
>       uint16_t            nb_tx_desc;    /**< number of TX descriptors. */
>       uint16_t            tx_tail;       /**< current value of TDT reg. */
>       /**< Start freeing TX buffers if there are less free descriptors than
> -- 
> 2.11.0
> 

Reply via email to