From: Danylo Vodopianov <dvo-...@napatech.com> Adds support for queue configure, start, stop, release. The internal macro and functions of ntnic were also added and initialized.
Signed-off-by: Danylo Vodopianov <dvo-...@napatech.com> --- doc/guides/nics/features/ntnic.ini | 1 + drivers/net/ntnic/include/ntos_drv.h | 34 +++++ drivers/net/ntnic/ntnic_ethdev.c | 208 +++++++++++++++++++++++++++ drivers/net/ntnic/ntutil/nt_util.c | 10 ++ drivers/net/ntnic/ntutil/nt_util.h | 2 + 5 files changed, 255 insertions(+) diff --git a/doc/guides/nics/features/ntnic.ini b/doc/guides/nics/features/ntnic.ini index b6d92c7ee1..8b9b87bdfe 100644 --- a/doc/guides/nics/features/ntnic.ini +++ b/doc/guides/nics/features/ntnic.ini @@ -7,6 +7,7 @@ FW version = Y Speed capabilities = Y Link status = Y +Queue start/stop = Y Unicast MAC filter = Y Multicast MAC filter = Y Linux = Y diff --git a/drivers/net/ntnic/include/ntos_drv.h b/drivers/net/ntnic/include/ntos_drv.h index 67baf9fe0c..a77e6a0247 100644 --- a/drivers/net/ntnic/include/ntos_drv.h +++ b/drivers/net/ntnic/include/ntos_drv.h @@ -13,6 +13,7 @@ #include <rte_ether.h> +#include "stream_binary_flow_api.h" #include "nthw_drv.h" #define NUM_MAC_ADDRS_PER_PORT (16U) @@ -21,6 +22,32 @@ #define NUM_ADAPTER_MAX (8) #define NUM_ADAPTER_PORTS_MAX (128) + +/* Max RSS queues */ +#define MAX_QUEUES 125 + +/* Structs: */ +struct __rte_cache_aligned ntnic_rx_queue { + struct flow_queue_id_s queue; /* queue info - user id and hw queue index */ + struct rte_mempool *mb_pool; /* mbuf memory pool */ + uint16_t buf_size; /* Size of data area in mbuf */ + int enabled; /* Enabling/disabling of this queue */ + + nt_meta_port_type_t type; + uint32_t port; /* Rx port for this queue */ + enum fpga_info_profile profile; /* Inline / Capture */ + +}; + +struct __rte_cache_aligned ntnic_tx_queue { + struct flow_queue_id_s queue; /* queue info - user id and hw queue index */ + nt_meta_port_type_t type; + + uint32_t port; /* Tx port for this queue */ + int enabled; /* Enabling/disabling of this queue */ + enum fpga_info_profile profile; /* Inline / Capture */ +}; + struct pmd_internals { const struct rte_pci_device *pci_dev; char name[20]; @@ -30,7 +57,14 @@ struct pmd_internals { unsigned int nb_tx_queues; /* Offset of the VF from the PF */ uint8_t vf_offset; + uint32_t port; nt_meta_port_type_t type; + struct flow_queue_id_s vpq[MAX_QUEUES]; + unsigned int vpq_nb_vq; + /* Array of Rx queues */ + struct ntnic_rx_queue rxq_scg[MAX_QUEUES]; + /* Array of Tx queues */ + struct ntnic_tx_queue txq_scg[MAX_QUEUES]; struct drv_s *p_drv; /* Ethernet (MAC) addresses. Element number zero denotes default address. */ struct rte_ether_addr eth_addrs[NUM_MAC_ADDRS_PER_PORT]; diff --git a/drivers/net/ntnic/ntnic_ethdev.c b/drivers/net/ntnic/ntnic_ethdev.c index 5af18a3b27..967e989575 100644 --- a/drivers/net/ntnic/ntnic_ethdev.c +++ b/drivers/net/ntnic/ntnic_ethdev.c @@ -3,12 +3,17 @@ * Copyright(c) 2023 Napatech A/S */ +#include <stdint.h> + #include <rte_eal.h> #include <rte_dev.h> #include <rte_vfio.h> #include <rte_ethdev.h> #include <rte_bus_pci.h> #include <ethdev_pci.h> +#include <rte_kvargs.h> + +#include <sys/queue.h> #include "ntlog.h" #include "ntdrv_4ga.h" @@ -24,6 +29,23 @@ #define EXCEPTION_PATH_HID 0 +#define MAX_TOTAL_QUEUES 128 + +/* Max RSS queues */ +#define MAX_QUEUES 125 + +#define ETH_DEV_NTNIC_HELP_ARG "help" +#define ETH_DEV_NTHW_RXQUEUES_ARG "rxqs" +#define ETH_DEV_NTHW_TXQUEUES_ARG "txqs" + +static const char *const valid_arguments[] = { + ETH_DEV_NTNIC_HELP_ARG, + ETH_DEV_NTHW_RXQUEUES_ARG, + ETH_DEV_NTHW_TXQUEUES_ARG, + NULL, +}; + + static const struct rte_pci_id nthw_pci_id_map[] = { { RTE_PCI_DEVICE(NT_HW_PCI_VENDOR_ID, NT_HW_PCI_DEVICE_ID_NT200A02) }, { @@ -161,6 +183,58 @@ eth_dev_infos_get(struct rte_eth_dev *eth_dev, struct rte_eth_dev_info *dev_info return 0; } +static void eth_tx_queue_release(struct rte_eth_dev *eth_dev, uint16_t queue_id) +{ + (void)eth_dev; + (void)queue_id; +} + +static void eth_rx_queue_release(struct rte_eth_dev *eth_dev, uint16_t queue_id) +{ + (void)eth_dev; + (void)queue_id; +} + +static int num_queues_alloced; + +/* Returns num queue starting at returned queue num or -1 on fail */ +static int allocate_queue(int num) +{ + int next_free = num_queues_alloced; + NT_LOG_DBGX(DBG, NTNIC, "num_queues_alloced=%u, New queues=%u, Max queues=%u\n", + num_queues_alloced, num, MAX_TOTAL_QUEUES); + + if (num_queues_alloced + num > MAX_TOTAL_QUEUES) + return -1; + + num_queues_alloced += num; + return next_free; +} + +static int eth_rx_queue_start(struct rte_eth_dev *eth_dev, uint16_t rx_queue_id) +{ + eth_dev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED; + return 0; +} + +static int eth_rx_queue_stop(struct rte_eth_dev *eth_dev, uint16_t rx_queue_id) +{ + eth_dev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED; + return 0; +} + +static int eth_tx_queue_start(struct rte_eth_dev *eth_dev, uint16_t rx_queue_id) +{ + eth_dev->data->tx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED; + return 0; +} + +static int eth_tx_queue_stop(struct rte_eth_dev *eth_dev, uint16_t rx_queue_id) +{ + eth_dev->data->tx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED; + return 0; +} + static int eth_mac_addr_add(struct rte_eth_dev *eth_dev, struct rte_ether_addr *mac_addr, @@ -247,6 +321,15 @@ eth_dev_start(struct rte_eth_dev *eth_dev) NT_LOG_DBGX(DBG, NTNIC, "Port %u\n", internals->n_intf_no); + /* Start queues */ + uint q; + + for (q = 0; q < internals->nb_rx_queues; q++) + eth_rx_queue_start(eth_dev, q); + + for (q = 0; q < internals->nb_tx_queues; q++) + eth_tx_queue_start(eth_dev, q); + if (internals->type == PORT_TYPE_VIRTUAL || internals->type == PORT_TYPE_OVERRIDE) { eth_dev->data->dev_link.link_status = RTE_ETH_LINK_UP; @@ -296,6 +379,16 @@ eth_dev_stop(struct rte_eth_dev *eth_dev) NT_LOG_DBGX(DBG, NTNIC, "Port %u\n", internals->n_intf_no); + if (internals->type != PORT_TYPE_VIRTUAL) { + uint q; + + for (q = 0; q < internals->nb_rx_queues; q++) + eth_rx_queue_stop(eth_dev, q); + + for (q = 0; q < internals->nb_tx_queues; q++) + eth_tx_queue_stop(eth_dev, q); + } + eth_dev->data->dev_link.link_status = RTE_ETH_LINK_DOWN; return 0; } @@ -438,6 +531,12 @@ static const struct eth_dev_ops nthw_eth_dev_ops = { .link_update = eth_link_update, .dev_infos_get = eth_dev_infos_get, .fw_version_get = eth_fw_version_get, + .rx_queue_start = eth_rx_queue_start, + .rx_queue_stop = eth_rx_queue_stop, + .rx_queue_release = eth_rx_queue_release, + .tx_queue_start = eth_tx_queue_start, + .tx_queue_stop = eth_tx_queue_stop, + .tx_queue_release = eth_tx_queue_release, .mac_addr_add = eth_mac_addr_add, .mac_addr_set = eth_mac_addr_set, .set_mc_addr_list = eth_set_mc_addr_list, @@ -462,6 +561,7 @@ nthw_pci_dev_init(struct rte_pci_device *pci_dev) return -1; } + int res; struct drv_s *p_drv; ntdrv_4ga_t *p_nt_drv; hw_info_t *p_hw_info; @@ -469,6 +569,7 @@ nthw_pci_dev_init(struct rte_pci_device *pci_dev) uint32_t n_port_mask = -1; /* All ports enabled by default */ uint32_t nb_rx_queues = 1; uint32_t nb_tx_queues = 1; + struct flow_queue_id_s queue_ids[MAX_QUEUES]; int n_phy_ports; struct port_link_speed pls_mbps[NUM_ADAPTER_PORTS_MAX] = { 0 }; int num_port_speeds = 0; @@ -476,6 +577,81 @@ nthw_pci_dev_init(struct rte_pci_device *pci_dev) pci_dev->addr.function, pci_dev->addr.bus, pci_dev->addr.devid, pci_dev->addr.function); + /* + * Process options/arguments + */ + if (pci_dev->device.devargs && pci_dev->device.devargs->args) { + int kvargs_count; + struct rte_kvargs *kvlist = + rte_kvargs_parse(pci_dev->device.devargs->args, valid_arguments); + + if (kvlist == NULL) + return -1; + + /* + * Argument: help + * NOTE: this argument/option check should be the first as it will stop + * execution after producing its output + */ + { + if (rte_kvargs_get(kvlist, ETH_DEV_NTNIC_HELP_ARG)) { + size_t i; + + for (i = 0; i < RTE_DIM(valid_arguments); i++) + if (valid_arguments[i] == NULL) + break; + + exit(0); + } + } + + /* + * rxq option/argument + * The number of rxq (hostbuffers) allocated in memory. + * Default is 32 RX Hostbuffers + */ + kvargs_count = rte_kvargs_count(kvlist, ETH_DEV_NTHW_RXQUEUES_ARG); + + if (kvargs_count != 0) { + assert(kvargs_count == 1); + res = rte_kvargs_process(kvlist, ETH_DEV_NTHW_RXQUEUES_ARG, &string_to_u32, + &nb_rx_queues); + + if (res < 0) { + NT_LOG_DBGX(ERR, NTNIC, + "problem with command line arguments: res=%d\n", + res); + return -1; + } + + NT_LOG_DBGX(DBG, NTNIC, "devargs: %s=%u\n", + ETH_DEV_NTHW_RXQUEUES_ARG, nb_rx_queues); + } + + /* + * txq option/argument + * The number of txq (hostbuffers) allocated in memory. + * Default is 32 TX Hostbuffers + */ + kvargs_count = rte_kvargs_count(kvlist, ETH_DEV_NTHW_TXQUEUES_ARG); + + if (kvargs_count != 0) { + assert(kvargs_count == 1); + res = rte_kvargs_process(kvlist, ETH_DEV_NTHW_TXQUEUES_ARG, &string_to_u32, + &nb_tx_queues); + + if (res < 0) { + NT_LOG_DBGX(ERR, NTNIC, + "problem with command line arguments: res=%d\n", + res); + return -1; + } + + NT_LOG_DBGX(DBG, NTNIC, "devargs: %s=%u\n", + ETH_DEV_NTHW_TXQUEUES_ARG, nb_tx_queues); + } + } + /* alloc */ p_drv = rte_zmalloc_socket(pci_dev->name, sizeof(struct drv_s), RTE_CACHE_LINE_SIZE, @@ -581,6 +757,7 @@ nthw_pci_dev_init(struct rte_pci_device *pci_dev) struct pmd_internals *internals = NULL; struct rte_eth_dev *eth_dev = NULL; char name[32]; + int i; if ((1 << n_intf_no) & ~n_port_mask) { NT_LOG_DBGX(DBG, NTNIC, @@ -608,6 +785,10 @@ nthw_pci_dev_init(struct rte_pci_device *pci_dev) internals->nb_rx_queues = nb_rx_queues; internals->nb_tx_queues = nb_tx_queues; + /* Not used queue index as dest port in bypass - use 0x80 + port nr */ + for (i = 0; i < MAX_QUEUES; i++) + internals->vpq[i].hw_id = -1; + /* Setup queue_ids */ if (nb_rx_queues > 1) { @@ -622,6 +803,33 @@ nthw_pci_dev_init(struct rte_pci_device *pci_dev) internals->n_intf_no, nb_tx_queues); } + int max_num_queues = (nb_rx_queues > nb_tx_queues) ? nb_rx_queues : nb_tx_queues; + int start_queue = allocate_queue(max_num_queues); + + if (start_queue < 0) + return -1; + + for (i = 0; i < (int)max_num_queues; i++) { + queue_ids[i].id = i; + queue_ids[i].hw_id = start_queue + i; + + internals->rxq_scg[i].queue = queue_ids[i]; + /* use same index in Rx and Tx rings */ + internals->txq_scg[i].queue = queue_ids[i]; + internals->rxq_scg[i].enabled = 0; + internals->txq_scg[i].type = internals->type; + internals->rxq_scg[i].type = internals->type; + internals->rxq_scg[i].port = internals->port; + } + + /* no tx queues - tx data goes out on phy */ + internals->vpq_nb_vq = 0; + + for (i = 0; i < (int)nb_tx_queues; i++) { + internals->txq_scg[i].port = internals->port; + internals->txq_scg[i].enabled = 0; + } + /* Set MAC address (but only if the MAC address is permitted) */ if (n_intf_no < fpga_info->nthw_hw_info.vpd_info.mn_mac_addr_count) { const uint64_t mac = diff --git a/drivers/net/ntnic/ntutil/nt_util.c b/drivers/net/ntnic/ntutil/nt_util.c index 72aabad090..7ee95d0642 100644 --- a/drivers/net/ntnic/ntutil/nt_util.c +++ b/drivers/net/ntnic/ntutil/nt_util.c @@ -234,3 +234,13 @@ int nt_link_duplex_to_eth_duplex(enum nt_link_duplex_e nt_link_duplex) return eth_link_duplex; } + +int string_to_u32(const char *key_str __rte_unused, const char *value_str, void *extra_args) +{ + if (!value_str || !extra_args) + return -1; + + const uint32_t value = strtol(value_str, NULL, 0); + *(uint32_t *)extra_args = value; + return 0; +} diff --git a/drivers/net/ntnic/ntutil/nt_util.h b/drivers/net/ntnic/ntutil/nt_util.h index d82e6d3248..64947f5fbf 100644 --- a/drivers/net/ntnic/ntutil/nt_util.h +++ b/drivers/net/ntnic/ntutil/nt_util.h @@ -57,4 +57,6 @@ uint32_t nt_link_speed_capa_to_eth_speed_capa(int nt_link_speed_capa); nt_link_speed_t convert_link_speed(int link_speed_mbps); int nt_link_duplex_to_eth_duplex(enum nt_link_duplex_e nt_link_duplex); +int string_to_u32(const char *key_str __rte_unused, const char *value_str, void *extra_args); + #endif /* NTOSS_SYSTEM_NT_UTIL_H */ -- 2.45.0