The patch does below things for fm10k PF and VF:
- Setup NIC to generate MSI-X interrupts
- Set the RXINT register to map interrupt causes to vectors
- Implement interrupt enable/disable functions

Signed-off-by: Shaopeng He <shaopeng.he at intel.com>
---
 drivers/net/fm10k/fm10k_ethdev.c | 147 +++++++++++++++++++++++++++++++++++++--
 1 file changed, 140 insertions(+), 7 deletions(-)

diff --git a/drivers/net/fm10k/fm10k_ethdev.c b/drivers/net/fm10k/fm10k_ethdev.c
index a82cd59..6648934 100644
--- a/drivers/net/fm10k/fm10k_ethdev.c
+++ b/drivers/net/fm10k/fm10k_ethdev.c
@@ -52,6 +52,8 @@
 /* Number of chars per uint32 type */
 #define CHARS_PER_UINT32 (sizeof(uint32_t))
 #define BIT_MASK_PER_UINT32 ((1 << CHARS_PER_UINT32) - 1)
+/* default 1:1 map from queue ID to interrupt vector ID */
+#define Q2V(dev, queue_id) (dev->pci_dev->intr_handle.intr_vec[queue_id])

 static void fm10k_close_mbx_service(struct fm10k_hw *hw);
 static void fm10k_dev_promiscuous_enable(struct rte_eth_dev *dev);
@@ -67,6 +69,8 @@ static void
 fm10k_MACVLAN_remove_all(struct rte_eth_dev *dev);
 static void fm10k_tx_queue_release(void *queue);
 static void fm10k_rx_queue_release(void *queue);
+static int
+fm10k_dev_rxq_interrupt_setup(struct rte_eth_dev *dev);

 static void
 fm10k_mbx_initlock(struct fm10k_hw *hw)
@@ -406,6 +410,7 @@ static int
 fm10k_dev_rx_init(struct rte_eth_dev *dev)
 {
        struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+       struct rte_intr_handle *intr_handle = &dev->pci_dev->intr_handle;
        int i, ret;
        struct fm10k_rx_queue *rxq;
        uint64_t base_addr;
@@ -413,10 +418,23 @@ fm10k_dev_rx_init(struct rte_eth_dev *dev)
        uint32_t rxdctl = FM10K_RXDCTL_WRITE_BACK_MIN_DELAY;
        uint16_t buf_size;

-       /* Disable RXINT to avoid possible interrupt */
-       for (i = 0; i < hw->mac.max_queues; i++)
+       /* enable RXINT for interrupt mode */
+       i = 0;
+       if (rte_intr_dp_is_en(intr_handle)) {
+               for (; i < dev->data->nb_rx_queues; i++) {
+                       FM10K_WRITE_REG(hw, FM10K_RXINT(i), Q2V(dev, i));
+                       if (hw->mac.type == fm10k_mac_pf)
+                               FM10K_WRITE_REG(hw, FM10K_ITR(Q2V(dev, i)),
+                                       FM10K_ITR_AUTOMASK | 
FM10K_ITR_MASK_CLEAR);
+                       else
+                               FM10K_WRITE_REG(hw, FM10K_VFITR(Q2V(dev, i)),
+                                       FM10K_ITR_AUTOMASK | 
FM10K_ITR_MASK_CLEAR);
+               }
+       }
+       /* Disable other RXINT to avoid possible interrupt */
+       for (; i < hw->mac.max_queues; i++)
                FM10K_WRITE_REG(hw, FM10K_RXINT(i),
-                               3 << FM10K_RXINT_TIMER_SHIFT);
+                       3 << FM10K_RXINT_TIMER_SHIFT);

        /* Setup RX queues */
        for (i = 0; i < dev->data->nb_rx_queues; ++i) {
@@ -741,6 +759,9 @@ fm10k_dev_start(struct rte_eth_dev *dev)
                return diag;
        }

+       if (fm10k_dev_rxq_interrupt_setup(dev))
+               return -EIO;
+
        diag = fm10k_dev_rx_init(dev);
        if (diag) {
                PMD_INIT_LOG(ERR, "RX init failed: %d", diag);
@@ -804,6 +825,8 @@ fm10k_dev_start(struct rte_eth_dev *dev)
 static void
 fm10k_dev_stop(struct rte_eth_dev *dev)
 {
+       struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+       struct rte_intr_handle *intr_handle = &dev->pci_dev->intr_handle;
        int i;

        PMD_INIT_FUNC_TRACE();
@@ -815,6 +838,26 @@ fm10k_dev_stop(struct rte_eth_dev *dev)
        if (dev->data->rx_queues)
                for (i = 0; i < dev->data->nb_rx_queues; i++)
                        fm10k_dev_rx_queue_stop(dev, i);
+
+       /* Disable datapath event */
+       if (rte_intr_dp_is_en(intr_handle)) {
+               for (i = 0; i < dev->data->nb_rx_queues; i++) {
+                       FM10K_WRITE_REG(hw, FM10K_RXINT(i),
+                               3 << FM10K_RXINT_TIMER_SHIFT);
+                       if (hw->mac.type == fm10k_mac_pf)
+                               FM10K_WRITE_REG(hw, FM10K_ITR(Q2V(dev, i)),
+                                       FM10K_ITR_MASK_SET);
+                       else
+                               FM10K_WRITE_REG(hw, FM10K_VFITR(Q2V(dev, i)),
+                                       FM10K_ITR_MASK_SET);
+               }
+       }
+       /* Clean datapath event and queue/vec mapping */
+       rte_intr_efd_disable(intr_handle);
+       if (intr_handle->intr_vec != NULL) {
+               rte_free(intr_handle->intr_vec);
+               intr_handle->intr_vec = NULL;
+       }
 }

 static void
@@ -1778,6 +1821,94 @@ fm10k_dev_disable_intr_vf(struct rte_eth_dev *dev)
 }

 static int
+fm10k_dev_rx_queue_intr_enable(struct rte_eth_dev *dev, uint16_t queue_id)
+{
+       struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+       /* Enable ITR */
+       if (hw->mac.type == fm10k_mac_pf)
+               FM10K_WRITE_REG(hw, FM10K_ITR(Q2V(dev, queue_id)),
+                       FM10K_ITR_AUTOMASK | FM10K_ITR_MASK_CLEAR);
+       else
+               FM10K_WRITE_REG(hw, FM10K_VFITR(Q2V(dev, queue_id)),
+                       FM10K_ITR_AUTOMASK | FM10K_ITR_MASK_CLEAR);
+       rte_intr_enable(&dev->pci_dev->intr_handle);
+       return 0;
+}
+
+static int
+fm10k_dev_rx_queue_intr_disable(struct rte_eth_dev *dev, uint16_t queue_id)
+{
+       struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+       /* Disable ITR */
+       if (hw->mac.type == fm10k_mac_pf)
+               FM10K_WRITE_REG(hw, FM10K_ITR(Q2V(dev, queue_id)),
+                       FM10K_ITR_MASK_SET);
+       else
+               FM10K_WRITE_REG(hw, FM10K_VFITR(Q2V(dev, queue_id)),
+                       FM10K_ITR_MASK_SET);
+       return 0;
+}
+
+static int
+fm10k_dev_rxq_interrupt_setup(struct rte_eth_dev *dev)
+{
+       struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+       struct rte_intr_handle *intr_handle = &dev->pci_dev->intr_handle;
+       uint32_t intr_vector, vec;
+       uint16_t queue_id;
+       int result = 0;
+
+       /* fm10k needs interrupt for mailbox
+        * so igb_uio is not supported for rx interrupt
+        */
+       if (!rte_intr_cap_multiple(intr_handle) ||
+                       dev->data->dev_conf.intr_conf.rxq == 0)
+               return result;
+
+       intr_vector = dev->data->nb_rx_queues;
+
+       /* disable interrupt first */
+       rte_intr_disable(&dev->pci_dev->intr_handle);
+       if (hw->mac.type == fm10k_mac_pf)
+               fm10k_dev_disable_intr_pf(dev);
+       else
+               fm10k_dev_disable_intr_vf(dev);
+
+       if (rte_intr_efd_enable(intr_handle, intr_vector)) {
+               PMD_INIT_LOG(ERR, "Failed to init event fd");
+               result = -EIO;
+       }
+
+       if (rte_intr_dp_is_en(intr_handle) && !result) {
+               intr_handle->intr_vec = rte_zmalloc("intr_vec",
+                       dev->data->nb_rx_queues * sizeof(int), 0);
+               if (intr_handle->intr_vec) {
+                       for (queue_id = 0, vec = RX_VEC_START;
+                                       queue_id < dev->data->nb_rx_queues;
+                                       queue_id++) {
+                               intr_handle->intr_vec[queue_id] = vec;
+                               if (vec < intr_handle->nb_efd - 1 + 
RX_VEC_START)
+                                       vec++;
+                       }
+               } else {
+                       PMD_INIT_LOG(ERR, "Failed to allocate %d rx_queues"
+                               " intr_vec", dev->data->nb_rx_queues);
+                       result = -ENOMEM;
+               }
+       }
+
+       if (hw->mac.type == fm10k_mac_pf)
+               fm10k_dev_enable_intr_pf(dev);
+       else
+               fm10k_dev_enable_intr_vf(dev);
+       rte_intr_enable(&dev->pci_dev->intr_handle);
+       hw->mac.ops.update_int_moderator(hw);
+       return result;
+}
+
+static int
 fm10k_dev_handle_fault(struct fm10k_hw *hw, uint32_t eicr)
 {
        struct fm10k_fault fault;
@@ -2050,6 +2181,8 @@ static const struct eth_dev_ops fm10k_eth_dev_ops = {
        .tx_queue_setup         = fm10k_tx_queue_setup,
        .tx_queue_release       = fm10k_tx_queue_release,
        .rx_descriptor_done     = fm10k_dev_rx_descriptor_done,
+       .rx_queue_intr_enable   = fm10k_dev_rx_queue_intr_enable,
+       .rx_queue_intr_disable  = fm10k_dev_rx_queue_intr_disable,
        .reta_update            = fm10k_reta_update,
        .reta_query             = fm10k_reta_query,
        .rss_hash_update        = fm10k_rss_hash_update,
@@ -2060,7 +2193,7 @@ static int
 eth_fm10k_dev_init(struct rte_eth_dev *dev)
 {
        struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
-       int diag;
+       int diag, i;
        struct fm10k_macvlan_filter_info *macvlan;

        PMD_INIT_FUNC_TRACE();
@@ -2177,7 +2310,7 @@ eth_fm10k_dev_init(struct rte_eth_dev *dev)
                fm10k_dev_enable_intr_vf(dev);
        }

-       /* Enable uio intr after callback registered */
+       /* Enable intr after callback registered */
        rte_intr_enable(&(dev->pci_dev->intr_handle));

        hw->mac.ops.update_int_moderator(hw);
@@ -2185,7 +2318,6 @@ eth_fm10k_dev_init(struct rte_eth_dev *dev)
        /* Make sure Switch Manager is ready before going forward. */
        if (hw->mac.type == fm10k_mac_pf) {
                int switch_ready = 0;
-               int i;

                for (i = 0; i < MAX_QUERY_SWITCH_STATE_TIMES; i++) {
                        fm10k_mbx_lock(hw);
@@ -2291,7 +2423,8 @@ static struct eth_driver rte_pmd_fm10k = {
        .pci_drv = {
                .name = "rte_pmd_fm10k",
                .id_table = pci_id_fm10k_map,
-               .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_DETACHABLE,
+               .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC |
+                       RTE_PCI_DRV_DETACHABLE,
        },
        .eth_dev_init = eth_fm10k_dev_init,
        .eth_dev_uninit = eth_fm10k_dev_uninit,
-- 
1.9.3

Reply via email to