>+static int stmmac_config_multi_msi(struct pci_dev *pdev, >+ struct plat_stmmacenet_data *plat, >+ struct stmmac_resources *res) >+{ For optimum RX & TX queue processing on the same IRQ, we should use irq_set_affinity_hint() to set those RXQ and TXQ IRQ to the same CPU. This will benefit processing for up-coming XDP TX and XDP TX ZC processing.
cpumask_t cpu_mask; >+ int ret; >+ int i; >+ >+ if (plat->msi_rx_base_vec >= STMMAC_MSI_VEC_MAX || >+ plat->msi_tx_base_vec >= STMMAC_MSI_VEC_MAX) { >+ dev_info(&pdev->dev, "%s: Invalid RX & TX vector defined\n", >+ __func__); >+ return -1; >+ } >+ >+ ret = pci_alloc_irq_vectors(pdev, 2, STMMAC_MSI_VEC_MAX, >+ PCI_IRQ_MSI | PCI_IRQ_MSIX); >+ if (ret < 0) { >+ dev_info(&pdev->dev, "%s: multi MSI enablement failed\n", >+ __func__); >+ return ret; >+ } >+ >+ /* For RX MSI */ >+ for (i = 0; i < plat->rx_queues_to_use; i++) { >+ res->rx_irq[i] = pci_irq_vector(pdev, >+ plat->msi_rx_base_vec + i * 2); cpumask_clear(&cpu_mask); cpumask_set_cpu(i % num_online_cpus(), &cpu_mask); irq_set_affinity_hint(res->rx_irq[i], &cpu_mask); >+ } >+ >+ /* For TX MSI */ >+ for (i = 0; i < plat->tx_queues_to_use; i++) { >+ res->tx_irq[i] = pci_irq_vector(pdev, >+ plat->msi_tx_base_vec + i * 2); cpumask_clear(&cpu_mask); cpumask_set_cpu(i % num_online_cpus(), &cpu_mask); irq_set_affinity_hint(res->tx_irq[i], &cpu_mask); >+ } >+ >+ if (plat->msi_mac_vec < STMMAC_MSI_VEC_MAX) >+ res->irq = pci_irq_vector(pdev, plat->msi_mac_vec); >+ if (plat->msi_wol_vec < STMMAC_MSI_VEC_MAX) >+ res->wol_irq = pci_irq_vector(pdev, plat->msi_wol_vec); >+ if (plat->msi_lpi_vec < STMMAC_MSI_VEC_MAX) >+ res->lpi_irq = pci_irq_vector(pdev, plat->msi_lpi_vec); >+ if (plat->msi_sfty_ce_vec < STMMAC_MSI_VEC_MAX) >+ res->sfty_ce_irq = pci_irq_vector(pdev, plat- >>msi_sfty_ce_vec); >+ if (plat->msi_sfty_ue_vec < STMMAC_MSI_VEC_MAX) >+ res->sfty_ue_irq = pci_irq_vector(pdev, plat- >>msi_sfty_ue_vec); >+ >+ plat->multi_msi_en = 1; >+ dev_info(&pdev->dev, "%s: multi MSI enablement successful\n", >__func__); >+ >+ return 0; >+}