Split the queue_start operation into first-half and second-half helpers. This allows us to batch up the queue commands during dev_start(), reducing the outage window when restarting the process by about 1ms per queue.
Signed-off-by: Andrew Boyer <andrew.bo...@amd.com> --- drivers/net/ionic/ionic_lif.c | 178 +++++++++++++++++++++------------ drivers/net/ionic/ionic_lif.h | 6 +- drivers/net/ionic/ionic_rxtx.c | 81 ++++++++++++--- drivers/net/ionic/ionic_rxtx.h | 10 ++ 4 files changed, 194 insertions(+), 81 deletions(-) diff --git a/drivers/net/ionic/ionic_lif.c b/drivers/net/ionic/ionic_lif.c index 8ffdbc4df7..1937e48d9b 100644 --- a/drivers/net/ionic/ionic_lif.c +++ b/drivers/net/ionic/ionic_lif.c @@ -1598,52 +1598,61 @@ ionic_lif_set_features(struct ionic_lif *lif) } int -ionic_lif_txq_init(struct ionic_tx_qcq *txq) +ionic_lif_txq_init_nowait(struct ionic_tx_qcq *txq) { struct ionic_qcq *qcq = &txq->qcq; struct ionic_queue *q = &qcq->q; struct ionic_lif *lif = qcq->lif; struct ionic_cq *cq = &qcq->cq; - struct ionic_admin_ctx ctx = { - .pending_work = true, - .cmd.q_init = { - .opcode = IONIC_CMD_Q_INIT, - .type = q->type, - .ver = lif->qtype_info[q->type].version, - .index = rte_cpu_to_le_32(q->index), - .flags = rte_cpu_to_le_16(IONIC_QINIT_F_ENA), - .intr_index = rte_cpu_to_le_16(IONIC_INTR_NONE), - .ring_size = rte_log2_u32(q->num_descs), - .cq_ring_base = rte_cpu_to_le_64(cq->base_pa), - .sg_ring_base = rte_cpu_to_le_64(q->sg_base_pa), - }, - }; + struct ionic_admin_ctx *ctx = &txq->admin_ctx; int err; + memset(ctx, 0, sizeof(*ctx)); + ctx->pending_work = true; + ctx->cmd.q_init.opcode = IONIC_CMD_Q_INIT; + ctx->cmd.q_init.type = q->type; + ctx->cmd.q_init.ver = lif->qtype_info[q->type].version; + ctx->cmd.q_init.index = rte_cpu_to_le_32(q->index); + ctx->cmd.q_init.flags = rte_cpu_to_le_16(IONIC_QINIT_F_ENA); + ctx->cmd.q_init.intr_index = rte_cpu_to_le_16(IONIC_INTR_NONE); + ctx->cmd.q_init.ring_size = rte_log2_u32(q->num_descs); + ctx->cmd.q_init.cq_ring_base = rte_cpu_to_le_64(cq->base_pa); + ctx->cmd.q_init.sg_ring_base = rte_cpu_to_le_64(q->sg_base_pa); + if (txq->flags & IONIC_QCQ_F_SG) - ctx.cmd.q_init.flags |= rte_cpu_to_le_16(IONIC_QINIT_F_SG); + ctx->cmd.q_init.flags |= rte_cpu_to_le_16(IONIC_QINIT_F_SG); if (txq->flags & IONIC_QCQ_F_CMB) { - ctx.cmd.q_init.flags |= rte_cpu_to_le_16(IONIC_QINIT_F_CMB); - ctx.cmd.q_init.ring_base = rte_cpu_to_le_64(q->cmb_base_pa); + ctx->cmd.q_init.flags |= rte_cpu_to_le_16(IONIC_QINIT_F_CMB); + ctx->cmd.q_init.ring_base = rte_cpu_to_le_64(q->cmb_base_pa); } else { - ctx.cmd.q_init.ring_base = rte_cpu_to_le_64(q->base_pa); + ctx->cmd.q_init.ring_base = rte_cpu_to_le_64(q->base_pa); } IONIC_PRINT(DEBUG, "txq_init.index %d", q->index); IONIC_PRINT(DEBUG, "txq_init.ring_base 0x%" PRIx64 "", q->base_pa); IONIC_PRINT(DEBUG, "txq_init.ring_size %d", - ctx.cmd.q_init.ring_size); - IONIC_PRINT(DEBUG, "txq_init.ver %u", ctx.cmd.q_init.ver); + ctx->cmd.q_init.ring_size); + IONIC_PRINT(DEBUG, "txq_init.ver %u", ctx->cmd.q_init.ver); ionic_q_reset(q); ionic_cq_reset(cq); - err = ionic_adminq_post_wait(lif, &ctx); + /* Caller responsible for calling ionic_lif_txq_init_done() */ + err = ionic_adminq_post(lif, ctx); if (err) - return err; + ctx->pending_work = false; + return err; +} - q->hw_type = ctx.comp.q_init.hw_type; - q->hw_index = rte_le_to_cpu_32(ctx.comp.q_init.hw_index); +void +ionic_lif_txq_init_done(struct ionic_tx_qcq *txq) +{ + struct ionic_lif *lif = txq->qcq.lif; + struct ionic_queue *q = &txq->qcq.q; + struct ionic_admin_ctx *ctx = &txq->admin_ctx; + + q->hw_type = ctx->comp.q_init.hw_type; + q->hw_index = rte_le_to_cpu_32(ctx->comp.q_init.hw_index); q->db = ionic_db_map(lif, q); IONIC_PRINT(DEBUG, "txq->hw_type %d", q->hw_type); @@ -1651,57 +1660,64 @@ ionic_lif_txq_init(struct ionic_tx_qcq *txq) IONIC_PRINT(DEBUG, "txq->db %p", q->db); txq->flags |= IONIC_QCQ_F_INITED; - - return 0; } int -ionic_lif_rxq_init(struct ionic_rx_qcq *rxq) +ionic_lif_rxq_init_nowait(struct ionic_rx_qcq *rxq) { struct ionic_qcq *qcq = &rxq->qcq; struct ionic_queue *q = &qcq->q; struct ionic_lif *lif = qcq->lif; struct ionic_cq *cq = &qcq->cq; - struct ionic_admin_ctx ctx = { - .pending_work = true, - .cmd.q_init = { - .opcode = IONIC_CMD_Q_INIT, - .type = q->type, - .ver = lif->qtype_info[q->type].version, - .index = rte_cpu_to_le_32(q->index), - .flags = rte_cpu_to_le_16(IONIC_QINIT_F_ENA), - .intr_index = rte_cpu_to_le_16(IONIC_INTR_NONE), - .ring_size = rte_log2_u32(q->num_descs), - .cq_ring_base = rte_cpu_to_le_64(cq->base_pa), - .sg_ring_base = rte_cpu_to_le_64(q->sg_base_pa), - }, - }; + struct ionic_admin_ctx *ctx = &rxq->admin_ctx; int err; + memset(ctx, 0, sizeof(*ctx)); + ctx->pending_work = true; + ctx->cmd.q_init.opcode = IONIC_CMD_Q_INIT; + ctx->cmd.q_init.type = q->type; + ctx->cmd.q_init.ver = lif->qtype_info[q->type].version; + ctx->cmd.q_init.index = rte_cpu_to_le_32(q->index); + ctx->cmd.q_init.flags = rte_cpu_to_le_16(IONIC_QINIT_F_ENA); + ctx->cmd.q_init.intr_index = rte_cpu_to_le_16(IONIC_INTR_NONE); + ctx->cmd.q_init.ring_size = rte_log2_u32(q->num_descs); + ctx->cmd.q_init.cq_ring_base = rte_cpu_to_le_64(cq->base_pa); + ctx->cmd.q_init.sg_ring_base = rte_cpu_to_le_64(q->sg_base_pa); + if (rxq->flags & IONIC_QCQ_F_SG) - ctx.cmd.q_init.flags |= rte_cpu_to_le_16(IONIC_QINIT_F_SG); + ctx->cmd.q_init.flags |= rte_cpu_to_le_16(IONIC_QINIT_F_SG); if (rxq->flags & IONIC_QCQ_F_CMB) { - ctx.cmd.q_init.flags |= rte_cpu_to_le_16(IONIC_QINIT_F_CMB); - ctx.cmd.q_init.ring_base = rte_cpu_to_le_64(q->cmb_base_pa); + ctx->cmd.q_init.flags |= rte_cpu_to_le_16(IONIC_QINIT_F_CMB); + ctx->cmd.q_init.ring_base = rte_cpu_to_le_64(q->cmb_base_pa); } else { - ctx.cmd.q_init.ring_base = rte_cpu_to_le_64(q->base_pa); + ctx->cmd.q_init.ring_base = rte_cpu_to_le_64(q->base_pa); } IONIC_PRINT(DEBUG, "rxq_init.index %d", q->index); IONIC_PRINT(DEBUG, "rxq_init.ring_base 0x%" PRIx64 "", q->base_pa); IONIC_PRINT(DEBUG, "rxq_init.ring_size %d", - ctx.cmd.q_init.ring_size); - IONIC_PRINT(DEBUG, "rxq_init.ver %u", ctx.cmd.q_init.ver); + ctx->cmd.q_init.ring_size); + IONIC_PRINT(DEBUG, "rxq_init.ver %u", ctx->cmd.q_init.ver); ionic_q_reset(q); ionic_cq_reset(cq); - err = ionic_adminq_post_wait(lif, &ctx); + /* Caller responsible for calling ionic_lif_rxq_init_done() */ + err = ionic_adminq_post(lif, ctx); if (err) - return err; + ctx->pending_work = false; + return err; +} - q->hw_type = ctx.comp.q_init.hw_type; - q->hw_index = rte_le_to_cpu_32(ctx.comp.q_init.hw_index); +void +ionic_lif_rxq_init_done(struct ionic_rx_qcq *rxq) +{ + struct ionic_lif *lif = rxq->qcq.lif; + struct ionic_queue *q = &rxq->qcq.q; + struct ionic_admin_ctx *ctx = &rxq->admin_ctx; + + q->hw_type = ctx->comp.q_init.hw_type; + q->hw_index = rte_le_to_cpu_32(ctx->comp.q_init.hw_index); q->db = ionic_db_map(lif, q); rxq->flags |= IONIC_QCQ_F_INITED; @@ -1709,8 +1725,6 @@ ionic_lif_rxq_init(struct ionic_rx_qcq *rxq) IONIC_PRINT(DEBUG, "rxq->hw_type %d", q->hw_type); IONIC_PRINT(DEBUG, "rxq->hw_index %d", q->hw_index); IONIC_PRINT(DEBUG, "rxq->db %p", q->db); - - return 0; } static int @@ -1959,9 +1973,11 @@ ionic_lif_configure(struct ionic_lif *lif) int ionic_lif_start(struct ionic_lif *lif) { + struct rte_eth_dev *dev = lif->eth_dev; uint32_t rx_mode; - uint32_t i; + uint32_t i, j, chunk; int err; + bool fatal = false; err = ionic_lif_rss_setup(lif); if (err) @@ -1982,25 +1998,57 @@ ionic_lif_start(struct ionic_lif *lif) "on port %u", lif->nrxqcqs, lif->ntxqcqs, lif->port_id); - for (i = 0; i < lif->nrxqcqs; i++) { - struct ionic_rx_qcq *rxq = lif->rxqcqs[i]; - if (!(rxq->flags & IONIC_QCQ_F_DEFERRED)) { - err = ionic_dev_rx_queue_start(lif->eth_dev, i); + chunk = ionic_adminq_space_avail(lif); + + for (i = 0; i < lif->nrxqcqs; i += chunk) { + if (lif->rxqcqs[0]->flags & IONIC_QCQ_F_DEFERRED) { + IONIC_PRINT(DEBUG, "Rx queue start deferred"); + break; + } + + for (j = 0; j < chunk && i + j < lif->nrxqcqs; j++) { + err = ionic_dev_rx_queue_start_firsthalf(dev, i + j); + if (err) { + fatal = true; + break; + } + } + for (j = 0; j < chunk && i + j < lif->nrxqcqs; j++) { + /* Commands that failed to post return immediately */ + err = ionic_dev_rx_queue_start_secondhalf(dev, i + j); if (err) - return err; + /* Don't break */ + fatal = true; } } + if (fatal) + return -EIO; - for (i = 0; i < lif->ntxqcqs; i++) { - struct ionic_tx_qcq *txq = lif->txqcqs[i]; - if (!(txq->flags & IONIC_QCQ_F_DEFERRED)) { - err = ionic_dev_tx_queue_start(lif->eth_dev, i); + for (i = 0; i < lif->ntxqcqs; i += chunk) { + if (lif->txqcqs[0]->flags & IONIC_QCQ_F_DEFERRED) { + IONIC_PRINT(DEBUG, "Tx queue start deferred"); + break; + } + + for (j = 0; j < chunk && i + j < lif->ntxqcqs; j++) { + err = ionic_dev_tx_queue_start_firsthalf(dev, i + j); + if (err) { + fatal = true; + break; + } + } + for (j = 0; j < chunk && i + j < lif->ntxqcqs; j++) { + /* Commands that failed to post return immediately */ + err = ionic_dev_tx_queue_start_secondhalf(dev, i + j); if (err) - return err; + /* Don't break */ + fatal = true; } } + if (fatal) + return -EIO; /* Carrier ON here */ lif->state |= IONIC_LIF_F_UP; diff --git a/drivers/net/ionic/ionic_lif.h b/drivers/net/ionic/ionic_lif.h index ee13f5b7c8..591cf1a2ff 100644 --- a/drivers/net/ionic/ionic_lif.h +++ b/drivers/net/ionic/ionic_lif.h @@ -228,11 +228,13 @@ int ionic_tx_qcq_alloc(struct ionic_lif *lif, uint32_t socket_id, struct ionic_tx_qcq **qcq_out); void ionic_qcq_free(struct ionic_qcq *qcq); -int ionic_lif_rxq_init(struct ionic_rx_qcq *rxq); +int ionic_lif_rxq_init_nowait(struct ionic_rx_qcq *rxq); +void ionic_lif_rxq_init_done(struct ionic_rx_qcq *rxq); void ionic_lif_rxq_deinit_nowait(struct ionic_rx_qcq *rxq); void ionic_lif_rxq_stats(struct ionic_rx_qcq *rxq); -int ionic_lif_txq_init(struct ionic_tx_qcq *txq); +int ionic_lif_txq_init_nowait(struct ionic_tx_qcq *txq); +void ionic_lif_txq_init_done(struct ionic_tx_qcq *txq); void ionic_lif_txq_deinit_nowait(struct ionic_tx_qcq *txq); void ionic_lif_txq_stats(struct ionic_tx_qcq *txq); diff --git a/drivers/net/ionic/ionic_rxtx.c b/drivers/net/ionic/ionic_rxtx.c index 774dc596c0..ad04e987eb 100644 --- a/drivers/net/ionic/ionic_rxtx.c +++ b/drivers/net/ionic/ionic_rxtx.c @@ -203,27 +203,54 @@ ionic_dev_tx_queue_setup(struct rte_eth_dev *eth_dev, uint16_t tx_queue_id, * Start Transmit Units for specified queue. */ int __rte_cold -ionic_dev_tx_queue_start(struct rte_eth_dev *eth_dev, uint16_t tx_queue_id) +ionic_dev_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id) { - uint8_t *tx_queue_state = eth_dev->data->tx_queue_state; - struct ionic_tx_qcq *txq; int err; + err = ionic_dev_tx_queue_start_firsthalf(dev, tx_queue_id); + if (err) + return err; + + return ionic_dev_tx_queue_start_secondhalf(dev, tx_queue_id); +} + +int __rte_cold +ionic_dev_tx_queue_start_firsthalf(struct rte_eth_dev *dev, + uint16_t tx_queue_id) +{ + uint8_t *tx_queue_state = dev->data->tx_queue_state; + struct ionic_tx_qcq *txq = dev->data->tx_queues[tx_queue_id]; + if (tx_queue_state[tx_queue_id] == RTE_ETH_QUEUE_STATE_STARTED) { IONIC_PRINT(DEBUG, "TX queue %u already started", tx_queue_id); return 0; } - txq = eth_dev->data->tx_queues[tx_queue_id]; - IONIC_PRINT(DEBUG, "Starting TX queue %u, %u descs", tx_queue_id, txq->qcq.q.num_descs); - err = ionic_lif_txq_init(txq); + return ionic_lif_txq_init_nowait(txq); +} + +int __rte_cold +ionic_dev_tx_queue_start_secondhalf(struct rte_eth_dev *dev, + uint16_t tx_queue_id) +{ + uint8_t *tx_queue_state = dev->data->tx_queue_state; + struct ionic_lif *lif = IONIC_ETH_DEV_TO_LIF(dev); + struct ionic_tx_qcq *txq = dev->data->tx_queues[tx_queue_id]; + int err; + + if (tx_queue_state[tx_queue_id] == RTE_ETH_QUEUE_STATE_STARTED) + return 0; + + err = ionic_adminq_wait(lif, &txq->admin_ctx); if (err) return err; + ionic_lif_txq_init_done(txq); + tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED; return 0; @@ -680,22 +707,31 @@ ionic_rx_init_descriptors(struct ionic_rx_qcq *rxq) * Start Receive Units for specified queue. */ int __rte_cold -ionic_dev_rx_queue_start(struct rte_eth_dev *eth_dev, uint16_t rx_queue_id) +ionic_dev_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id) { - uint8_t *rx_queue_state = eth_dev->data->rx_queue_state; - struct ionic_rx_qcq *rxq; - struct ionic_queue *q; int err; + err = ionic_dev_rx_queue_start_firsthalf(dev, rx_queue_id); + if (err) + return err; + + return ionic_dev_rx_queue_start_secondhalf(dev, rx_queue_id); +} + +int __rte_cold +ionic_dev_rx_queue_start_firsthalf(struct rte_eth_dev *dev, + uint16_t rx_queue_id) +{ + uint8_t *rx_queue_state = dev->data->rx_queue_state; + struct ionic_rx_qcq *rxq = dev->data->rx_queues[rx_queue_id]; + struct ionic_queue *q = &rxq->qcq.q; + if (rx_queue_state[rx_queue_id] == RTE_ETH_QUEUE_STATE_STARTED) { IONIC_PRINT(DEBUG, "RX queue %u already started", rx_queue_id); return 0; } - rxq = eth_dev->data->rx_queues[rx_queue_id]; - q = &rxq->qcq.q; - rxq->frame_size = rxq->qcq.lif->frame_size - RTE_ETHER_CRC_LEN; /* Recalculate segment count based on MTU */ @@ -707,10 +743,27 @@ ionic_dev_rx_queue_start(struct rte_eth_dev *eth_dev, uint16_t rx_queue_id) ionic_rx_init_descriptors(rxq); - err = ionic_lif_rxq_init(rxq); + return ionic_lif_rxq_init_nowait(rxq); +} + +int __rte_cold +ionic_dev_rx_queue_start_secondhalf(struct rte_eth_dev *dev, + uint16_t rx_queue_id) +{ + uint8_t *rx_queue_state = dev->data->rx_queue_state; + struct ionic_lif *lif = IONIC_ETH_DEV_TO_LIF(dev); + struct ionic_rx_qcq *rxq = dev->data->rx_queues[rx_queue_id]; + int err; + + if (rx_queue_state[rx_queue_id] == RTE_ETH_QUEUE_STATE_STARTED) + return 0; + + err = ionic_adminq_wait(lif, &rxq->admin_ctx); if (err) return err; + ionic_lif_rxq_init_done(rxq); + /* Allocate buffers for descriptor ring */ if (rxq->flags & IONIC_QCQ_F_SG) err = ionic_rx_fill_sg(rxq); diff --git a/drivers/net/ionic/ionic_rxtx.h b/drivers/net/ionic/ionic_rxtx.h index 7ca23178cc..a342afec54 100644 --- a/drivers/net/ionic/ionic_rxtx.h +++ b/drivers/net/ionic/ionic_rxtx.h @@ -46,6 +46,16 @@ void ionic_dev_tx_queue_release(struct rte_eth_dev *dev, uint16_t qid); int ionic_dev_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id); int ionic_dev_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id); +/* Helpers for optimized dev_start() */ +int ionic_dev_rx_queue_start_firsthalf(struct rte_eth_dev *dev, + uint16_t rx_queue_id); +int ionic_dev_rx_queue_start_secondhalf(struct rte_eth_dev *dev, + uint16_t rx_queue_id); +int ionic_dev_tx_queue_start_firsthalf(struct rte_eth_dev *dev, + uint16_t tx_queue_id); +int ionic_dev_tx_queue_start_secondhalf(struct rte_eth_dev *dev, + uint16_t tx_queue_id); + /* Helpers for optimized dev_stop() */ void ionic_dev_rx_queue_stop_firsthalf(struct rte_eth_dev *dev, uint16_t rx_queue_id); -- 2.17.1