On Mon, Dec 04, 2017 at 06:02:08AM -0800, Xiao Wang wrote:
[...]
> diff --git a/drivers/net/virtio/virtio_rxtx.c 
> b/drivers/net/virtio/virtio_rxtx.c
> index 6a24fde..7313bdd 100644
> --- a/drivers/net/virtio/virtio_rxtx.c
> +++ b/drivers/net/virtio/virtio_rxtx.c
> @@ -1100,3 +1100,84 @@
>  
>       return nb_tx;
>  }
> +
> +uint16_t
> +virtio_inject_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t 
> nb_pkts)
> +{
> +     struct virtnet_tx *txvq = tx_queue;
> +     struct virtqueue *vq = txvq->vq;
> +     struct virtio_hw *hw = vq->hw;
> +     uint16_t hdr_size = hw->vtnet_hdr_size;
> +     uint16_t nb_used, nb_tx = 0;
> +
> +     if (unlikely(nb_pkts < 1))
> +             return nb_pkts;
> +
> +     PMD_TX_LOG(DEBUG, "%d packets to xmit", nb_pkts);
> +     nb_used = VIRTQUEUE_NUSED(vq);
> +
> +     virtio_rmb();
> +     if (likely(nb_used > vq->vq_nentries - vq->vq_free_thresh))
> +             virtio_xmit_cleanup(vq, nb_used);
> +
> +     for (nb_tx = 0; nb_tx < nb_pkts; nb_tx++) {
> +             struct rte_mbuf *txm = tx_pkts[nb_tx];
> +             int can_push = 0, use_indirect = 0, slots, need;
> +
> +             /* optimize ring usage */
> +             if ((vtpci_with_feature(hw, VIRTIO_F_ANY_LAYOUT) ||
> +                                     vtpci_with_feature(hw, 
> VIRTIO_F_VERSION_1)) &&
> +                     rte_mbuf_refcnt_read(txm) == 1 &&
> +                     RTE_MBUF_DIRECT(txm) &&
> +                     txm->nb_segs == 1 &&
> +                     rte_pktmbuf_headroom(txm) >= hdr_size &&
> +                     rte_is_aligned(rte_pktmbuf_mtod(txm, char *),
> +                             __alignof__(struct virtio_net_hdr_mrg_rxbuf)))
> +                     can_push = 1;
> +             else if (vtpci_with_feature(hw, VIRTIO_RING_F_INDIRECT_DESC) &&
> +                      txm->nb_segs < VIRTIO_MAX_TX_INDIRECT)
> +                     use_indirect = 1;
> +
> +             /* How many main ring entries are needed to this Tx?
> +              * any_layout => number of segments
> +              * indirect   => 1
> +              * default    => number of segments + 1
> +              */
> +             slots = use_indirect ? 1 : (txm->nb_segs + !can_push);
> +             need = slots - vq->vq_free_cnt;
> +
> +             /* Positive value indicates it need free vring descriptors */
> +             if (unlikely(need > 0)) {
> +                     nb_used = VIRTQUEUE_NUSED(vq);
> +                     virtio_rmb();
> +                     need = RTE_MIN(need, (int)nb_used);
> +
> +                     virtio_xmit_cleanup(vq, need);
> +                     need = slots - vq->vq_free_cnt;
> +                     if (unlikely(need > 0)) {
> +                             PMD_TX_LOG(ERR,
> +                                             "No free tx descriptors to 
> transmit");
> +                             break;
> +                     }
> +             }
> +
> +             /* Enqueue Packet buffers */
> +             virtqueue_enqueue_xmit(txvq, txm, slots, use_indirect, 
> can_push);
> +
> +             txvq->stats.bytes += txm->pkt_len;
> +             virtio_update_packet_stats(&txvq->stats, txm);
> +     }
> +
> +     txvq->stats.packets += nb_tx;
> +
> +     if (likely(nb_tx)) {
> +             vq_update_avail_idx(vq);
> +
> +             if (unlikely(virtqueue_kick_prepare(vq))) {
> +                     virtqueue_notify(vq);
> +                     PMD_TX_LOG(DEBUG, "Notified backend after xmit");
> +             }
> +     }
> +
> +     return nb_tx;
> +}

Simple Tx has some special assumptions and setups of the txq.
Basically the current implementation of virtio_inject_pkts()
is a mirror of virtio_xmit_pkts(). So when simple Tx function
is chosen, calling virtio_inject_pkts() could cause problems.

Best regards,
Tiwei Bie

Reply via email to