Signed-off-by: Daniele Di Proietto <ddiproie...@vmware.com> --- lib/netdev-dpdk.c | 139 +++++++++++++++++++++++++++++++----------------------- 1 file changed, 80 insertions(+), 59 deletions(-)
diff --git a/lib/netdev-dpdk.c b/lib/netdev-dpdk.c index ee811eb..0798a18 100644 --- a/lib/netdev-dpdk.c +++ b/lib/netdev-dpdk.c @@ -605,107 +605,128 @@ netdev_dpdk_rxq_recv(struct netdev_rxq *rxq_, struct ofpbuf **packets, int *c) } inline static void -dpdk_queue_pkt(struct netdev_dpdk *dev, int qid, - struct rte_mbuf *pkt) +dpdk_queue_pkts(struct netdev_dpdk *dev, int qid, + struct rte_mbuf **pkts, int c) { struct dpdk_tx_queue *txq = &dev->tx_q[qid]; uint64_t diff_tsc; uint64_t cur_tsc; uint32_t nb_tx; + int i = 0; + rte_spinlock_lock(&txq->tx_lock); - txq->burst_pkts[txq->count++] = pkt; - if (txq->count == MAX_TX_QUEUE_LEN) { - goto flush; - } - cur_tsc = rte_get_timer_cycles(); - if (txq->count == 1) { - txq->tsc = cur_tsc; - } - diff_tsc = cur_tsc - txq->tsc; - if (diff_tsc >= DRAIN_TSC) { - goto flush; - } - rte_spinlock_unlock(&txq->tx_lock); - return; + while (i < c) { + int freeslots = MAX_TX_QUEUE_LEN - txq->count; + int tocopy = MIN(freeslots, c-i); -flush: - nb_tx = rte_eth_tx_burst(dev->port_id, qid, txq->burst_pkts, txq->count); - if (nb_tx != txq->count) { - /* free buffers if we couldn't transmit packets */ - rte_mempool_put_bulk(dev->dpdk_mp->mp, - (void **) &txq->burst_pkts[nb_tx], - (txq->count - nb_tx)); + memcpy(&txq->burst_pkts[txq->count], &pkts[i], tocopy * sizeof (struct rte_mbuf *)); + + txq->count += tocopy; + i += tocopy; + + if (txq->count == MAX_TX_QUEUE_LEN) { + goto flush; + } + cur_tsc = rte_get_timer_cycles(); + if (txq->count == 1) { + txq->tsc = cur_tsc; + } + diff_tsc = cur_tsc - txq->tsc; + if (diff_tsc >= DRAIN_TSC) { + goto flush; + } + continue; + + flush: + nb_tx = rte_eth_tx_burst(dev->port_id, qid, txq->burst_pkts, txq->count); + if (nb_tx != txq->count) { + /* free buffers if we couldn't transmit packets */ + rte_mempool_put_bulk(dev->dpdk_mp->mp, + (void **) &txq->burst_pkts[nb_tx], + (txq->count - nb_tx)); + } + txq->count = 0; } - txq->count = 0; rte_spinlock_unlock(&txq->tx_lock); } /* Tx function. Transmit packets indefinitely */ static void -dpdk_do_tx_copy(struct netdev *netdev, char *buf, int size) +dpdk_do_tx_copy(struct netdev *netdev, struct ofpbuf ** ofpbufs, int c) { struct netdev_dpdk *dev = netdev_dpdk_cast(netdev); - struct rte_mbuf *pkt; + struct rte_mbuf *pkts[c]; + int i; - pkt = rte_pktmbuf_alloc(dev->dpdk_mp->mp); - if (!pkt) { - ovs_mutex_lock(&dev->mutex); - dev->stats.tx_dropped++; - ovs_mutex_unlock(&dev->mutex); - return; - } + for (i = 0; i < c; i++) { + int size = ofpbuf_size(ofpbufs[i]); + if (size > dev->max_packet_len) { + VLOG_WARN_RL(&rl, "Too big size %d max_packet_len %d", + (int)size , dev->max_packet_len); + + ovs_mutex_lock(&dev->mutex); + dev->stats.tx_dropped++; + ovs_mutex_unlock(&dev->mutex); + } + + pkts[i] = rte_pktmbuf_alloc(dev->dpdk_mp->mp); + + if (!pkts[i]) { + ovs_mutex_lock(&dev->mutex); + dev->stats.tx_dropped++; + ovs_mutex_unlock(&dev->mutex); + return; + } - /* We have to do a copy for now */ - memcpy(pkt->pkt.data, buf, size); + /* We have to do a copy for now */ + memcpy(pkts[i]->pkt.data, ofpbuf_data(ofpbufs[i]), size); - rte_pktmbuf_data_len(pkt) = size; - rte_pktmbuf_pkt_len(pkt) = size; + rte_pktmbuf_data_len(pkts[i]) = size; + rte_pktmbuf_pkt_len(pkts[i]) = size; + } - dpdk_queue_pkt(dev, NON_PMD_THREAD_TX_QUEUE, pkt); + dpdk_queue_pkts(dev, NON_PMD_THREAD_TX_QUEUE, pkts, c); dpdk_queue_flush(dev, NON_PMD_THREAD_TX_QUEUE); } static int -netdev_dpdk_send(struct netdev *netdev, - struct ofpbuf *ofpbuf, bool may_steal) +netdev_dpdk_send_batch(struct netdev *netdev, + struct ofpbuf **ofpbufs, int c, bool may_steal) { struct netdev_dpdk *dev = netdev_dpdk_cast(netdev); int ret; + int i; - if (ofpbuf_size(ofpbuf) > dev->max_packet_len) { - VLOG_WARN_RL(&rl, "Too big size %d max_packet_len %d", - (int)ofpbuf_size(ofpbuf) , dev->max_packet_len); - - ovs_mutex_lock(&dev->mutex); - dev->stats.tx_dropped++; - ovs_mutex_unlock(&dev->mutex); - - ret = E2BIG; - goto out; - } - - if (!may_steal || ofpbuf->source != OFPBUF_DPDK) { - dpdk_do_tx_copy(netdev, (char *) ofpbuf_data(ofpbuf), ofpbuf_size(ofpbuf)); + if (!may_steal || ofpbufs[0]->source != OFPBUF_DPDK) { + dpdk_do_tx_copy(netdev, ofpbufs, c); if (may_steal) { - ofpbuf_delete(ofpbuf); + for (i = 0; i < c; i++) { + ofpbuf_delete(ofpbufs[i]); + } } } else { int qid; qid = rte_lcore_id() % NR_QUEUE; - dpdk_queue_pkt(dev, qid, (struct rte_mbuf *)ofpbuf); - + /* TODO: warn if any packet bigger than dev->max_packet_len */ + dpdk_queue_pkts(dev, qid, (struct rte_mbuf **)ofpbufs, c); } ret = 0; -out: return ret; } static int +netdev_dpdk_send(struct netdev *netdev, + struct ofpbuf *ofpbuf, bool may_steal) +{ + return netdev_dpdk_send_batch(netdev, &ofpbuf, 1, may_steal); +} + +static int netdev_dpdk_set_etheraddr(struct netdev *netdev, const uint8_t mac[ETH_ADDR_LEN]) { @@ -1122,7 +1143,7 @@ static struct netdev_class netdev_dpdk_class = { NULL, /* get_tunnel_config */ netdev_dpdk_send, /* send */ - NULL, /* send_batch */ + netdev_dpdk_send_batch, /* send_batch */ NULL, /* send_wait */ netdev_dpdk_set_etheraddr, -- 2.0.0.rc0 _______________________________________________ dev mailing list dev@openvswitch.org http://openvswitch.org/mailman/listinfo/dev