[dpdk-dev] [PATCH] ethdev: allow multiple security sessions to use one rte flow
The rte_security API which enables inline protocol/crypto feature mandates that for every security session an rte_flow is created. This would internally translate to a rule in the hardware which would do packet classification. In rte_securty, one SA would be one security session. And if an rte_flow need to be created for every session, the number of SAs supported by an inline implementation would be limited by the number of rte_flows the PMD would be able to support. If the fields SPI & IP addresses are allowed to be a range, then this limitation can be overcome. Multiple flows will be able to use one rule for SECURITY processing. In this case, the security session provided as conf would be NULL. Application should do an rte_flow_validate() to make sure the flow is supported on the PMD. Signed-off-by: Anoob Joseph --- lib/librte_ethdev/rte_flow.h | 6 ++ 1 file changed, 6 insertions(+) diff --git a/lib/librte_ethdev/rte_flow.h b/lib/librte_ethdev/rte_flow.h index 452d359..21fa7ed 100644 --- a/lib/librte_ethdev/rte_flow.h +++ b/lib/librte_ethdev/rte_flow.h @@ -2239,6 +2239,12 @@ struct rte_flow_action_meter { * direction. * * Multiple flows can be configured to use the same security session. + * + * The NULL value is allowed for security session. If security session is NULL, + * then SPI field in ESP flow item and IP addresses in flow items 'IPv4' and + * 'IPv6' will be allowed to be a range. The rule thus created can enable + * SECURITY processing on multiple flows. + * */ struct rte_flow_action_security { void *security_session; /**< Pointer to security session structure. */ -- 2.7.4
[dpdk-dev] [PATCH 2/3] drivers: use structure marker typedef in eal
From: Jerin Jacob Use new marker typedef available in EAL. Signed-off-by: Jerin Jacob --- drivers/net/ark/ark_ethdev_rx.c | 2 +- drivers/net/ark/ark_ethdev_tx.c | 2 +- drivers/net/octeontx2/otx2_ethdev.h | 6 +++--- drivers/net/thunderx/nicvf_struct.h | 4 ++-- 4 files changed, 7 insertions(+), 7 deletions(-) diff --git a/drivers/net/ark/ark_ethdev_rx.c b/drivers/net/ark/ark_ethdev_rx.c index 6156730bb..4d518d558 100644 --- a/drivers/net/ark/ark_ethdev_rx.c +++ b/drivers/net/ark/ark_ethdev_rx.c @@ -57,7 +57,7 @@ struct ark_rx_queue { /* separate cache line */ /* second cache line - fields only used in slow path */ - MARKER cacheline1 __rte_cache_min_aligned; + RTE_MARKER cacheline1 __rte_cache_min_aligned; volatile uint32_t prod_index; /* step 2 filled by FPGA */ } __rte_cache_aligned; diff --git a/drivers/net/ark/ark_ethdev_tx.c b/drivers/net/ark/ark_ethdev_tx.c index 08bcf431a..289668774 100644 --- a/drivers/net/ark/ark_ethdev_tx.c +++ b/drivers/net/ark/ark_ethdev_tx.c @@ -42,7 +42,7 @@ struct ark_tx_queue { uint32_t pad[1]; /* second cache line - fields only used in slow path */ - MARKER cacheline1 __rte_cache_min_aligned; + RTE_MARKER cacheline1 __rte_cache_min_aligned; uint32_t cons_index;/* hw is done, can be freed */ } __rte_cache_aligned; diff --git a/drivers/net/octeontx2/otx2_ethdev.h b/drivers/net/octeontx2/otx2_ethdev.h index 987e7607c..a6e8483a0 100644 --- a/drivers/net/octeontx2/otx2_ethdev.h +++ b/drivers/net/octeontx2/otx2_ethdev.h @@ -254,7 +254,7 @@ struct otx2_vlan_info { struct otx2_eth_dev { OTX2_DEV; /* Base class */ - MARKER otx2_eth_dev_data_start; + RTE_MARKER otx2_eth_dev_data_start; uint16_t sqb_size; uint16_t rx_chan_base; uint16_t tx_chan_base; @@ -335,7 +335,7 @@ struct otx2_eth_txq { rte_iova_t fc_iova; uint16_t sqes_per_sqb_log2; int16_t nb_sqb_bufs_adj; - MARKER slow_path_start; + RTE_MARKER slow_path_start; uint16_t nb_sqb_bufs; uint16_t sq; uint64_t offloads; @@ -357,7 +357,7 @@ struct otx2_eth_rxq { uint32_t available; uint16_t rq; struct otx2_timesync_info *tstamp; - MARKER slow_path_start; + RTE_MARKER slow_path_start; uint64_t aura; uint64_t offloads; uint32_t qlen; diff --git a/drivers/net/thunderx/nicvf_struct.h b/drivers/net/thunderx/nicvf_struct.h index 5d1379803..cf1c281a0 100644 --- a/drivers/net/thunderx/nicvf_struct.h +++ b/drivers/net/thunderx/nicvf_struct.h @@ -55,7 +55,7 @@ union mbuf_initializer { }; struct nicvf_rxq { - MARKER rxq_fastpath_data_start; + RTE_MARKER rxq_fastpath_data_start; uint8_t rbptr_offset; uint16_t rx_free_thresh; uint32_t head; @@ -69,7 +69,7 @@ struct nicvf_rxq { struct rte_mempool *pool; union cq_entry_t *desc; union mbuf_initializer mbuf_initializer; - MARKER rxq_fastpath_data_end; + RTE_MARKER rxq_fastpath_data_end; uint8_t rx_drop_en; uint16_t precharge_cnt; uint16_t port_id; -- 2.24.0
[dpdk-dev] [PATCH 3/3] mbuf: use structure marker typedef in eal
From: Jerin Jacob Use new marker typedef available in EAL and remove private marker typedef. Signed-off-by: Jerin Jacob --- lib/librte_mbuf/rte_mbuf_core.h | 21 - 1 file changed, 4 insertions(+), 17 deletions(-) diff --git a/lib/librte_mbuf/rte_mbuf_core.h b/lib/librte_mbuf/rte_mbuf_core.h index 9a8557d1c..b9a59c879 100644 --- a/lib/librte_mbuf/rte_mbuf_core.h +++ b/lib/librte_mbuf/rte_mbuf_core.h @@ -406,19 +406,6 @@ extern "C" { #defineRTE_MBUF_DEFAULT_BUF_SIZE \ (RTE_MBUF_DEFAULT_DATAROOM + RTE_PKTMBUF_HEADROOM) -/* - * define a set of marker types that can be used to refer to set points in the - * mbuf. - */ -__extension__ -typedef void*MARKER[0]; /**< generic marker for a point in a structure */ -__extension__ -typedef uint8_t MARKER8[0]; /**< generic marker with 1B alignment */ - - /** marker that allows us to overwrite 8 bytes with a single assignment */ -__extension__ -typedef uint64_t MARKER64[0]; - struct rte_mbuf_sched { uint32_t queue_id; /**< Queue ID. */ uint8_t traffic_class; @@ -478,7 +465,7 @@ enum { * The generic rte_mbuf, containing a packet mbuf. */ struct rte_mbuf { - MARKER cacheline0; + RTE_MARKER cacheline0; void *buf_addr; /**< Virtual address of segment buffer. */ /** @@ -494,7 +481,7 @@ struct rte_mbuf { } __rte_aligned(sizeof(rte_iova_t)); /* next 8 bytes are initialised on RX descriptor rearm */ - MARKER64 rearm_data; + RTE_MARKER64 rearm_data; uint16_t data_off; /** @@ -522,7 +509,7 @@ struct rte_mbuf { uint64_t ol_flags;/**< Offload features. */ /* remaining bytes are set on RX when pulling packet from descriptor */ - MARKER rx_descriptor_fields1; + RTE_MARKER rx_descriptor_fields1; /* * The packet type, which is the combination of outer/inner L2, L3, L4 @@ -610,7 +597,7 @@ struct rte_mbuf { uint64_t timestamp; /* second cache line - fields only used in slow path or on TX */ - MARKER cacheline1 __rte_cache_min_aligned; + RTE_MARKER cacheline1 __rte_cache_min_aligned; RTE_STD_C11 union { -- 2.24.0
[dpdk-dev] [PATCH 1/3] eal: introduce structure marker typedefs
From: Jerin Jacob Introduce EAL typedef for structure 1B, 2B, 4B, 8B alignment marking and a generic marker for a point in a structure. Signed-off-by: Jerin Jacob --- lib/librte_eal/common/include/rte_common.h | 12 1 file changed, 12 insertions(+) diff --git a/lib/librte_eal/common/include/rte_common.h b/lib/librte_eal/common/include/rte_common.h index 459d082d1..9556299ba 100644 --- a/lib/librte_eal/common/include/rte_common.h +++ b/lib/librte_eal/common/include/rte_common.h @@ -335,6 +335,18 @@ typedef uint64_t phys_addr_t; typedef uint64_t rte_iova_t; #define RTE_BAD_IOVA ((rte_iova_t)-1) +/*** Structure alignment markers / + +/** Generic marker for a point in a structure. */ +__extension__ typedef void*RTE_MARKER[0]; +/** Marker for 1B alignment in a structure. */ +__extension__ typedef uint8_t RTE_MARKER8[0]; +/** Marker for 2B alignment in a structure. */ +__extension__ typedef uint16_t RTE_MARKER16[0]; +/** Marker for 4B alignment in a structure. */ +__extension__ typedef uint16_t RTE_MARKER32[0]; +/** Marker for 8B alignment in a structure. */ +__extension__ typedef uint64_t RTE_MARKER64[0]; /** * Combines 32b inputs most significant set bits into the least -- 2.24.0
[dpdk-dev] [PATCH 02/15] common/octeontx2: add routine to check if rte_eth_dev belongs to otx2
From: Vamsi Attunuru This routine returns true if given rte_eth_dev belongs to octeontx2 Signed-off-by: Anoob Joseph Signed-off-by: Tejasree Kondoj Signed-off-by: Vamsi Attunuru --- drivers/common/octeontx2/otx2_common.c | 20 drivers/common/octeontx2/otx2_common.h | 2 ++ .../octeontx2/rte_common_octeontx2_version.map | 1 + 3 files changed, 23 insertions(+) diff --git a/drivers/common/octeontx2/otx2_common.c b/drivers/common/octeontx2/otx2_common.c index 7e45366..116db0f 100644 --- a/drivers/common/octeontx2/otx2_common.c +++ b/drivers/common/octeontx2/otx2_common.c @@ -3,6 +3,7 @@ */ #include +#include #include #include @@ -23,6 +24,25 @@ otx2_npa_set_defaults(struct otx2_idev_cfg *idev) /** * @internal + * Check if rte_eth_dev is otx2_eth_dev + */ +uint8_t +otx2_is_ethdev(struct rte_eth_dev *eth_dev) +{ + struct rte_pci_device *pci_dev; + + pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev); + + if (pci_dev->id.device_id == PCI_DEVID_OCTEONTX2_RVU_PF || + pci_dev->id.device_id == PCI_DEVID_OCTEONTX2_RVU_VF || + pci_dev->id.device_id == PCI_DEVID_OCTEONTX2_RVU_AF_VF) + return 1; + + return 0; +} + +/** + * @internal * Get intra device config structure. */ struct otx2_idev_cfg * diff --git a/drivers/common/octeontx2/otx2_common.h b/drivers/common/octeontx2/otx2_common.h index f62c45d..d32e59a 100644 --- a/drivers/common/octeontx2/otx2_common.h +++ b/drivers/common/octeontx2/otx2_common.h @@ -8,6 +8,7 @@ #include #include #include +#include #include #include #include @@ -70,6 +71,7 @@ struct otx2_idev_cfg *otx2_intra_dev_get_cfg(void); void otx2_sso_pf_func_set(uint16_t sso_pf_func); uint16_t otx2_sso_pf_func_get(void); uint16_t otx2_npa_pf_func_get(void); +uint8_t otx2_is_ethdev(struct rte_eth_dev *eth_dev); struct otx2_npa_lf *otx2_npa_lf_obj_get(void); void otx2_npa_set_defaults(struct otx2_idev_cfg *idev); int otx2_npa_lf_active(void *dev); diff --git a/drivers/common/octeontx2/rte_common_octeontx2_version.map b/drivers/common/octeontx2/rte_common_octeontx2_version.map index adad21a..dac2283 100644 --- a/drivers/common/octeontx2/rte_common_octeontx2_version.map +++ b/drivers/common/octeontx2/rte_common_octeontx2_version.map @@ -6,6 +6,7 @@ DPDK_20.0 { otx2_dev_priv_init; otx2_disable_irqs; otx2_intra_dev_get_cfg; + otx2_is_ethdev; otx2_logtype_base; otx2_logtype_dpi; otx2_logtype_mbox; -- 2.7.4
[dpdk-dev] [PATCH 00/15] add OCTEONTX2 inline IPsec support
This series adds inline IPsec support in OCTEONTX2 PMD. In the inbound path, rte_flow framework need to be used to configure the NPC block, which does the h/w lookup. The packets would get processed by the crypto block and would submit to the scheduling block, SSO. So inline IPsec mode can be enabled only when traffic is received via event device using Rx adapter. In the outbound path, the core would submit to the crypto block and the crypto block would submit the packet for Tx internally. Ankur Dwivedi (3): crypto/octeontx2: add eth security capabilities crypto/octeontx2: add datapath ops in eth security ctx crypto/octeontx2: add inline tx path changes Anoob Joseph (4): common/octeontx2: add CPT LF mbox for inline inbound crypto/octeontx2: create eth security ctx crypto/octeontx2: enable CPT to share QP with ethdev crypto/octeontx2: add eth security session operations Archana Muniganti (3): crypto/octeontx2: add lookup mem changes to hold sa indices drivers/octeontx2: add sec in compiler optimized RX fastpath framework drivers/octeontx2: add sec in compiler optimized TX fastpath framework Tejasree Kondoj (3): crypto/octeontx2: configure for inline IPsec crypto/octeontx2: add security in eth dev configure net/octeontx2: add inline ipsec rx path changes Vamsi Attunuru (2): common/octeontx2: add routine to check if rte_eth_dev belongs to otx2 crypto/octeontx2: sync inline tag type cfg with Rx adapter configuration doc/guides/nics/octeontx2.rst | 20 + drivers/common/octeontx2/otx2_common.c | 22 + drivers/common/octeontx2/otx2_common.h | 38 + drivers/common/octeontx2/otx2_mbox.h | 7 + .../octeontx2/rte_common_octeontx2_version.map | 3 + drivers/crypto/octeontx2/Makefile | 7 +- drivers/crypto/octeontx2/meson.build | 7 +- drivers/crypto/octeontx2/otx2_cryptodev.c | 8 + .../crypto/octeontx2/otx2_cryptodev_hw_access.h| 22 +- drivers/crypto/octeontx2/otx2_cryptodev_mbox.c | 53 ++ drivers/crypto/octeontx2/otx2_cryptodev_mbox.h | 7 + drivers/crypto/octeontx2/otx2_cryptodev_ops.c | 56 ++ drivers/crypto/octeontx2/otx2_cryptodev_qp.h | 35 + drivers/crypto/octeontx2/otx2_ipsec_fp.h | 348 drivers/crypto/octeontx2/otx2_security.c | 909 + drivers/crypto/octeontx2/otx2_security.h | 158 drivers/crypto/octeontx2/otx2_security_tx.h| 176 drivers/event/octeontx2/Makefile | 1 + drivers/event/octeontx2/meson.build| 5 +- drivers/event/octeontx2/otx2_evdev.c | 170 ++-- drivers/event/octeontx2/otx2_evdev.h | 4 +- drivers/event/octeontx2/otx2_worker.c | 6 +- drivers/event/octeontx2/otx2_worker.h | 6 + drivers/event/octeontx2/otx2_worker_dual.c | 6 +- drivers/net/octeontx2/Makefile | 1 + drivers/net/octeontx2/meson.build | 3 + drivers/net/octeontx2/otx2_ethdev.c| 46 +- drivers/net/octeontx2/otx2_ethdev.h| 2 + drivers/net/octeontx2/otx2_ethdev_devargs.c| 19 + drivers/net/octeontx2/otx2_flow.c | 26 + drivers/net/octeontx2/otx2_lookup.c| 9 +- drivers/net/octeontx2/otx2_rx.c| 27 +- drivers/net/octeontx2/otx2_rx.h| 386 ++--- drivers/net/octeontx2/otx2_tx.c| 29 +- drivers/net/octeontx2/otx2_tx.h| 271 -- 35 files changed, 2596 insertions(+), 297 deletions(-) create mode 100644 drivers/crypto/octeontx2/otx2_cryptodev_qp.h create mode 100644 drivers/crypto/octeontx2/otx2_ipsec_fp.h create mode 100644 drivers/crypto/octeontx2/otx2_security.c create mode 100644 drivers/crypto/octeontx2/otx2_security.h create mode 100644 drivers/crypto/octeontx2/otx2_security_tx.h -- 2.7.4
[dpdk-dev] [PATCH 05/15] crypto/octeontx2: add security in eth dev configure
From: Tejasree Kondoj Adding security in eth device configure. Signed-off-by: Ankur Dwivedi Signed-off-by: Anoob Joseph Signed-off-by: Archana Muniganti Signed-off-by: Tejasree Kondoj Signed-off-by: Vamsi Attunuru --- doc/guides/nics/octeontx2.rst | 20 + drivers/common/octeontx2/otx2_common.h | 4 + drivers/crypto/octeontx2/Makefile | 2 +- drivers/crypto/octeontx2/meson.build| 1 + drivers/crypto/octeontx2/otx2_cryptodev.c | 2 + drivers/crypto/octeontx2/otx2_ipsec_fp.h| 55 + drivers/crypto/octeontx2/otx2_security.c| 122 drivers/crypto/octeontx2/otx2_security.h| 4 + drivers/net/octeontx2/otx2_ethdev.c | 22 - drivers/net/octeontx2/otx2_ethdev.h | 2 + drivers/net/octeontx2/otx2_ethdev_devargs.c | 19 + 11 files changed, 251 insertions(+), 2 deletions(-) create mode 100644 drivers/crypto/octeontx2/otx2_ipsec_fp.h diff --git a/doc/guides/nics/octeontx2.rst b/doc/guides/nics/octeontx2.rst index db62a45..fd4e455 100644 --- a/doc/guides/nics/octeontx2.rst +++ b/doc/guides/nics/octeontx2.rst @@ -38,6 +38,7 @@ Features of the OCTEON TX2 Ethdev PMD are: - IEEE1588 timestamping - HW offloaded `ethdev Rx queue` to `eventdev event queue` packet injection - Support Rx interrupt +- Inline IPsec processing support Prerequisites - @@ -178,6 +179,17 @@ Runtime Config Options traffic on this port should be higig2 traffic only. Supported switch header types are "higig2" and "dsa". +- ``Max SPI for inbound inline IPsec`` (default ``1``) + + Max SPI supported for inbound inline IPsec processing can be specified by + ``ipsec_in_max_spi`` ``devargs`` parameter. + + For example:: + -w 0002:02:00.0,ipsec_in_max_spi=128 + + With the above configuration, application can enable inline IPsec processing + on 128 SAs (SPI 0-127). + .. note:: Above devarg parameters are configurable per device, user needs to pass the @@ -211,6 +223,14 @@ SDP interface support ~ OCTEON TX2 SDP interface support is limited to PF device, No VF support. +Inline Protocol Processing +~~ +``net_octeontx2`` pmd doesn't support the following features for packets to be +inline protocol processed. +- TSO offload +- VLAN/QinQ offload +- Fragmentation + Debugging Options - diff --git a/drivers/common/octeontx2/otx2_common.h b/drivers/common/octeontx2/otx2_common.h index a1cb434..9705a8d 100644 --- a/drivers/common/octeontx2/otx2_common.h +++ b/drivers/common/octeontx2/otx2_common.h @@ -79,10 +79,14 @@ int otx2_npa_lf_obj_ref(void); typedef int (*otx2_sec_eth_ctx_create_t)(struct rte_eth_dev *eth_dev); typedef void (*otx2_sec_eth_ctx_destroy_t)(struct rte_eth_dev *eth_dev); +typedef int (*otx2_sec_eth_init_t)(struct rte_eth_dev *eth_dev); +typedef void (*otx2_sec_eth_fini_t)(struct rte_eth_dev *eth_dev); struct otx2_sec_eth_crypto_idev_ops { otx2_sec_eth_ctx_create_t ctx_create; otx2_sec_eth_ctx_destroy_t ctx_destroy; + otx2_sec_eth_init_t init; + otx2_sec_eth_fini_t fini; }; extern struct otx2_sec_eth_crypto_idev_ops otx2_sec_idev_ops; diff --git a/drivers/crypto/octeontx2/Makefile b/drivers/crypto/octeontx2/Makefile index d2e9b9f..5966ddc 100644 --- a/drivers/crypto/octeontx2/Makefile +++ b/drivers/crypto/octeontx2/Makefile @@ -11,7 +11,7 @@ LIB = librte_pmd_octeontx2_crypto.a CFLAGS += $(WERROR_FLAGS) LDLIBS += -lrte_eal -lrte_ethdev -lrte_mbuf -lrte_mempool -lrte_ring -LDLIBS += -lrte_cryptodev -lrte_security +LDLIBS += -lrte_cryptodev -lrte_security -lrte_eventdev LDLIBS += -lrte_pci -lrte_bus_pci LDLIBS += -lrte_common_cpt -lrte_common_octeontx2 diff --git a/drivers/crypto/octeontx2/meson.build b/drivers/crypto/octeontx2/meson.build index f7b2937..f0f5043 100644 --- a/drivers/crypto/octeontx2/meson.build +++ b/drivers/crypto/octeontx2/meson.build @@ -9,6 +9,7 @@ deps += ['bus_pci'] deps += ['common_cpt'] deps += ['common_octeontx2'] deps += ['ethdev'] +deps += ['eventdev'] deps += ['security'] name = 'octeontx2_crypto' diff --git a/drivers/crypto/octeontx2/otx2_cryptodev.c b/drivers/crypto/octeontx2/otx2_cryptodev.c index 86c1188..34feb82 100644 --- a/drivers/crypto/octeontx2/otx2_cryptodev.c +++ b/drivers/crypto/octeontx2/otx2_cryptodev.c @@ -158,4 +158,6 @@ RTE_INIT(otx2_cpt_init_log) otx2_sec_idev_ops.ctx_create = otx2_sec_eth_ctx_create; otx2_sec_idev_ops.ctx_destroy = otx2_sec_eth_ctx_destroy; + otx2_sec_idev_ops.init = otx2_sec_eth_init; + otx2_sec_idev_ops.fini = otx2_sec_eth_fini; } diff --git a/drivers/crypto/octeontx2/otx2_ipsec_fp.h b/drivers/crypto/octeontx2/otx2_ipsec_fp.h new file mode 100644 index 000..520a3fe --- /dev/null +++ b/drivers/crypto/octeontx2/otx2_ipsec_fp.h @@ -0,0 +1,55 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(C) 2019 Marvell International Ltd. + */ + +#ifn
[dpdk-dev] [PATCH 01/15] common/octeontx2: add CPT LF mbox for inline inbound
Adding the new mbox introduced to configure CPT LF to be used for inline inbound. Signed-off-by: Anoob Joseph Signed-off-by: Tejasree Kondoj --- drivers/common/octeontx2/otx2_mbox.h | 7 +++ 1 file changed, 7 insertions(+) diff --git a/drivers/common/octeontx2/otx2_mbox.h b/drivers/common/octeontx2/otx2_mbox.h index e0e4e2f..70452d1 100644 --- a/drivers/common/octeontx2/otx2_mbox.h +++ b/drivers/common/octeontx2/otx2_mbox.h @@ -193,6 +193,8 @@ M(CPT_SET_CRYPTO_GRP, 0xA03, cpt_set_crypto_grp, \ msg_rsp) \ M(CPT_INLINE_IPSEC_CFG, 0xA04, cpt_inline_ipsec_cfg, \ cpt_inline_ipsec_cfg_msg, msg_rsp) \ +M(CPT_RX_INLINE_LF_CFG, 0xBFE, cpt_rx_inline_lf_cfg, \ + cpt_rx_inline_lf_cfg_msg, msg_rsp) \ /* NPC mbox IDs (range 0x6000 - 0x7FFF) */ \ M(NPC_MCAM_ALLOC_ENTRY,0x6000, npc_mcam_alloc_entry, \ npc_mcam_alloc_entry_req, \ @@ -1202,6 +1204,11 @@ struct cpt_inline_ipsec_cfg_msg { uint16_t __otx2_io nix_pf_func; /* Outbound path NIX_PF_FUNC */ }; +struct cpt_rx_inline_lf_cfg_msg { + struct mbox_msghdr hdr; + uint16_t __otx2_io sso_pf_func; +}; + /* NPC mbox message structs */ #define NPC_MCAM_ENTRY_INVALID 0x -- 2.7.4
[dpdk-dev] [PATCH 04/15] crypto/octeontx2: create eth security ctx
Adding security ctx to the eth device. Signed-off-by: Ankur Dwivedi Signed-off-by: Anoob Joseph Signed-off-by: Archana Muniganti Signed-off-by: Tejasree Kondoj Signed-off-by: Vamsi Attunuru --- drivers/common/octeontx2/otx2_common.c | 2 ++ drivers/common/octeontx2/otx2_common.h | 10 +++ .../octeontx2/rte_common_octeontx2_version.map | 2 ++ drivers/crypto/octeontx2/Makefile | 3 +- drivers/crypto/octeontx2/meson.build | 4 ++- drivers/crypto/octeontx2/otx2_cryptodev.c | 4 +++ drivers/crypto/octeontx2/otx2_security.c | 35 ++ drivers/crypto/octeontx2/otx2_security.h | 14 + drivers/net/octeontx2/otx2_ethdev.c| 18 ++- 9 files changed, 89 insertions(+), 3 deletions(-) create mode 100644 drivers/crypto/octeontx2/otx2_security.c create mode 100644 drivers/crypto/octeontx2/otx2_security.h diff --git a/drivers/common/octeontx2/otx2_common.c b/drivers/common/octeontx2/otx2_common.c index 116db0f..764f6cd 100644 --- a/drivers/common/octeontx2/otx2_common.c +++ b/drivers/common/octeontx2/otx2_common.c @@ -11,6 +11,8 @@ #include "otx2_dev.h" #include "otx2_mbox.h" +struct otx2_sec_eth_crypto_idev_ops otx2_sec_idev_ops; + /** * @internal * Set default NPA configuration. diff --git a/drivers/common/octeontx2/otx2_common.h b/drivers/common/octeontx2/otx2_common.h index d32e59a..a1cb434 100644 --- a/drivers/common/octeontx2/otx2_common.h +++ b/drivers/common/octeontx2/otx2_common.h @@ -77,6 +77,16 @@ void otx2_npa_set_defaults(struct otx2_idev_cfg *idev); int otx2_npa_lf_active(void *dev); int otx2_npa_lf_obj_ref(void); +typedef int (*otx2_sec_eth_ctx_create_t)(struct rte_eth_dev *eth_dev); +typedef void (*otx2_sec_eth_ctx_destroy_t)(struct rte_eth_dev *eth_dev); + +struct otx2_sec_eth_crypto_idev_ops { + otx2_sec_eth_ctx_create_t ctx_create; + otx2_sec_eth_ctx_destroy_t ctx_destroy; +}; + +extern struct otx2_sec_eth_crypto_idev_ops otx2_sec_idev_ops; + /* Log */ extern int otx2_logtype_base; extern int otx2_logtype_mbox; diff --git a/drivers/common/octeontx2/rte_common_octeontx2_version.map b/drivers/common/octeontx2/rte_common_octeontx2_version.map index dac2283..d1dcb52 100644 --- a/drivers/common/octeontx2/rte_common_octeontx2_version.map +++ b/drivers/common/octeontx2/rte_common_octeontx2_version.map @@ -32,5 +32,7 @@ DPDK_20.0 { otx2_sso_pf_func_set; otx2_unregister_irq; + otx2_sec_idev_ops; + local: *; }; diff --git a/drivers/crypto/octeontx2/Makefile b/drivers/crypto/octeontx2/Makefile index 3ba67ed..d2e9b9f 100644 --- a/drivers/crypto/octeontx2/Makefile +++ b/drivers/crypto/octeontx2/Makefile @@ -11,7 +11,7 @@ LIB = librte_pmd_octeontx2_crypto.a CFLAGS += $(WERROR_FLAGS) LDLIBS += -lrte_eal -lrte_ethdev -lrte_mbuf -lrte_mempool -lrte_ring -LDLIBS += -lrte_cryptodev +LDLIBS += -lrte_cryptodev -lrte_security LDLIBS += -lrte_pci -lrte_bus_pci LDLIBS += -lrte_common_cpt -lrte_common_octeontx2 @@ -38,6 +38,7 @@ SRCS-$(CONFIG_RTE_LIBRTE_PMD_OCTEONTX2_CRYPTO) += otx2_cryptodev_capabilities.c SRCS-$(CONFIG_RTE_LIBRTE_PMD_OCTEONTX2_CRYPTO) += otx2_cryptodev_hw_access.c SRCS-$(CONFIG_RTE_LIBRTE_PMD_OCTEONTX2_CRYPTO) += otx2_cryptodev_mbox.c SRCS-$(CONFIG_RTE_LIBRTE_PMD_OCTEONTX2_CRYPTO) += otx2_cryptodev_ops.c +SRCS-$(CONFIG_RTE_LIBRTE_PMD_OCTEONTX2_CRYPTO) += otx2_security.c # export include files SYMLINK-y-include += diff --git a/drivers/crypto/octeontx2/meson.build b/drivers/crypto/octeontx2/meson.build index 67deca3..f7b2937 100644 --- a/drivers/crypto/octeontx2/meson.build +++ b/drivers/crypto/octeontx2/meson.build @@ -9,6 +9,7 @@ deps += ['bus_pci'] deps += ['common_cpt'] deps += ['common_octeontx2'] deps += ['ethdev'] +deps += ['security'] name = 'octeontx2_crypto' allow_experimental_apis = true @@ -16,7 +17,8 @@ sources = files('otx2_cryptodev.c', 'otx2_cryptodev_capabilities.c', 'otx2_cryptodev_hw_access.c', 'otx2_cryptodev_mbox.c', - 'otx2_cryptodev_ops.c') + 'otx2_cryptodev_ops.c', + 'otx2_security.c') extra_flags = [] # This integrated controller runs only on a arm64 machine, remove 32bit warnings diff --git a/drivers/crypto/octeontx2/otx2_cryptodev.c b/drivers/crypto/octeontx2/otx2_cryptodev.c index 7fd216b..86c1188 100644 --- a/drivers/crypto/octeontx2/otx2_cryptodev.c +++ b/drivers/crypto/octeontx2/otx2_cryptodev.c @@ -17,6 +17,7 @@ #include "otx2_cryptodev_mbox.h" #include "otx2_cryptodev_ops.h" #include "otx2_dev.h" +#include "otx2_security.h" /* CPT common headers */ #include "cpt_common.h" @@ -154,4 +155,7 @@ RTE_INIT(otx2_cpt_init_log) otx2_cpt_logtype = rte_log_register("pmd.crypto.octeontx2"); if (otx2_cpt_logtype >= 0) rte_log_set_level(otx2_cpt_logtype, RTE_LOG_NOTICE); + + otx2_sec_idev_ops.ctx_create = otx2_sec
[dpdk-dev] [PATCH 08/15] crypto/octeontx2: add eth security session operations
Adding security session operations in eth security ctx. Signed-off-by: Ankur Dwivedi Signed-off-by: Anoob Joseph Signed-off-by: Archana Muniganti Signed-off-by: Tejasree Kondoj Signed-off-by: Vamsi Attunuru --- drivers/crypto/octeontx2/otx2_ipsec_fp.h | 293 ++ drivers/crypto/octeontx2/otx2_security.c | 340 +++ drivers/crypto/octeontx2/otx2_security.h | 23 ++- 3 files changed, 655 insertions(+), 1 deletion(-) diff --git a/drivers/crypto/octeontx2/otx2_ipsec_fp.h b/drivers/crypto/octeontx2/otx2_ipsec_fp.h index 520a3fe..be83c23 100644 --- a/drivers/crypto/octeontx2/otx2_ipsec_fp.h +++ b/drivers/crypto/octeontx2/otx2_ipsec_fp.h @@ -5,6 +5,67 @@ #ifndef __OTX2_IPSEC_FP_H__ #define __OTX2_IPSEC_FP_H__ +#include +#include + +enum { + OTX2_IPSEC_FP_SA_DIRECTION_INBOUND = 0, + OTX2_IPSEC_FP_SA_DIRECTION_OUTBOUND = 1, +}; + +enum { + OTX2_IPSEC_FP_SA_IP_VERSION_4 = 0, + OTX2_IPSEC_FP_SA_IP_VERSION_6 = 1, +}; + +enum { + OTX2_IPSEC_FP_SA_MODE_TRANSPORT = 0, + OTX2_IPSEC_FP_SA_MODE_TUNNEL = 1, +}; + +enum { + OTX2_IPSEC_FP_SA_PROTOCOL_AH = 0, + OTX2_IPSEC_FP_SA_PROTOCOL_ESP = 1, +}; + +enum { + OTX2_IPSEC_FP_SA_AES_KEY_LEN_128 = 1, + OTX2_IPSEC_FP_SA_AES_KEY_LEN_192 = 2, + OTX2_IPSEC_FP_SA_AES_KEY_LEN_256 = 3, +}; + +enum { + OTX2_IPSEC_FP_SA_ENC_NULL = 0, + OTX2_IPSEC_FP_SA_ENC_DES_CBC = 1, + OTX2_IPSEC_FP_SA_ENC_3DES_CBC = 2, + OTX2_IPSEC_FP_SA_ENC_AES_CBC = 3, + OTX2_IPSEC_FP_SA_ENC_AES_CTR = 4, + OTX2_IPSEC_FP_SA_ENC_AES_GCM = 5, + OTX2_IPSEC_FP_SA_ENC_AES_CCM = 6, +}; + +enum { + OTX2_IPSEC_FP_SA_AUTH_NULL = 0, + OTX2_IPSEC_FP_SA_AUTH_MD5 = 1, + OTX2_IPSEC_FP_SA_AUTH_SHA1 = 2, + OTX2_IPSEC_FP_SA_AUTH_SHA2_224 = 3, + OTX2_IPSEC_FP_SA_AUTH_SHA2_256 = 4, + OTX2_IPSEC_FP_SA_AUTH_SHA2_384 = 5, + OTX2_IPSEC_FP_SA_AUTH_SHA2_512 = 6, + OTX2_IPSEC_FP_SA_AUTH_AES_GMAC = 7, + OTX2_IPSEC_FP_SA_AUTH_AES_XCBC_128 = 8, +}; + +enum { + OTX2_IPSEC_FP_SA_FRAG_POST = 0, + OTX2_IPSEC_FP_SA_FRAG_PRE = 1, +}; + +enum { + OTX2_IPSEC_FP_SA_ENCAP_NONE = 0, + OTX2_IPSEC_FP_SA_ENCAP_UDP = 1, +}; + struct otx2_ipsec_fp_sa_ctl { rte_be32_t spi : 32; uint64_t exp_proto_inter_frag : 8; @@ -24,6 +85,26 @@ struct otx2_ipsec_fp_sa_ctl { uint64_t aes_key_len : 2; }; +struct otx2_ipsec_fp_out_sa { + /* w0 */ + struct otx2_ipsec_fp_sa_ctl ctl; + + /* w1 */ + uint8_t nonce[4]; + uint16_t udp_src; + uint16_t udp_dst; + + /* w2 */ + uint32_t ip_src; + uint32_t ip_dst; + + /* w3-w6 */ + uint8_t cipher_key[32]; + + /* w7-w12 */ + uint8_t hmac_key[48]; +}; + struct otx2_ipsec_fp_in_sa { /* w0 */ struct otx2_ipsec_fp_sa_ctl ctl; @@ -52,4 +133,216 @@ struct otx2_ipsec_fp_in_sa { uint64_t reserved2; }; +static inline int +ipsec_fp_xform_cipher_verify(struct rte_crypto_sym_xform *xform) +{ + if (xform->cipher.algo == RTE_CRYPTO_CIPHER_AES_CBC) { + switch (xform->cipher.key.length) { + case 16: + case 24: + case 32: + break; + default: + return -ENOTSUP; + } + return 0; + } + + return -ENOTSUP; +} + +static inline int +ipsec_fp_xform_auth_verify(struct rte_crypto_sym_xform *xform) +{ + if (xform->auth.algo == RTE_CRYPTO_AUTH_SHA1_HMAC) { + if (xform->auth.key.length == 64) + return 0; + } + + return -ENOTSUP; +} + +static inline int +ipsec_fp_xform_aead_verify(struct rte_security_ipsec_xform *ipsec, + struct rte_crypto_sym_xform *xform) +{ + if (ipsec->direction == RTE_SECURITY_IPSEC_SA_DIR_EGRESS && + xform->aead.op != RTE_CRYPTO_AEAD_OP_ENCRYPT) + return -EINVAL; + + if (ipsec->direction == RTE_SECURITY_IPSEC_SA_DIR_INGRESS && + xform->aead.op != RTE_CRYPTO_AEAD_OP_DECRYPT) + return -EINVAL; + + if (xform->aead.algo == RTE_CRYPTO_AEAD_AES_GCM) { + switch (xform->aead.key.length) { + case 16: + case 24: + case 32: + break; + default: + return -EINVAL; + } + return 0; + } + + return -ENOTSUP; +} + +static inline int +ipsec_fp_xform_verify(struct rte_security_ipsec_xform *ipsec, + struct rte_crypto_sym_xform *xform) +{ + struct rte_crypto_sym_xform *auth_xform, *cipher_xform; + int ret; + + if (xform->type == RTE_CRYPTO_SYM_XFORM_AEAD) + return ipsec_fp_xform_aead_verify(ipsec, xform); + + if (xform->next == NULL) + return -EINVAL; + + if (
[dpdk-dev] [PATCH 03/15] crypto/octeontx2: configure for inline IPsec
From: Tejasree Kondoj For enabling outbound inline IPsec, a CPT queue needs to be tied to a NIX PF_FUNC. Distribute CPT queues fairly among all availble otx2 eth ports. For inbound, one CPT LF will be assigned and initialized by kernel. Signed-off-by: Ankur Dwivedi Signed-off-by: Anoob Joseph Signed-off-by: Archana Muniganti Signed-off-by: Tejasree Kondoj Signed-off-by: Vamsi Attunuru --- drivers/crypto/octeontx2/Makefile | 3 +- drivers/crypto/octeontx2/meson.build | 2 + drivers/crypto/octeontx2/otx2_cryptodev_mbox.c | 53 ++ drivers/crypto/octeontx2/otx2_cryptodev_mbox.h | 7 drivers/crypto/octeontx2/otx2_cryptodev_ops.c | 38 ++ 5 files changed, 102 insertions(+), 1 deletion(-) diff --git a/drivers/crypto/octeontx2/Makefile b/drivers/crypto/octeontx2/Makefile index f7d6c37..3ba67ed 100644 --- a/drivers/crypto/octeontx2/Makefile +++ b/drivers/crypto/octeontx2/Makefile @@ -10,7 +10,7 @@ LIB = librte_pmd_octeontx2_crypto.a # build flags CFLAGS += $(WERROR_FLAGS) -LDLIBS += -lrte_eal -lrte_mbuf -lrte_mempool -lrte_ring +LDLIBS += -lrte_eal -lrte_ethdev -lrte_mbuf -lrte_mempool -lrte_ring LDLIBS += -lrte_cryptodev LDLIBS += -lrte_pci -lrte_bus_pci LDLIBS += -lrte_common_cpt -lrte_common_octeontx2 @@ -21,6 +21,7 @@ CFLAGS += -O3 CFLAGS += -I$(RTE_SDK)/drivers/common/cpt CFLAGS += -I$(RTE_SDK)/drivers/common/octeontx2 CFLAGS += -I$(RTE_SDK)/drivers/mempool/octeontx2 +CFLAGS += -I$(RTE_SDK)/drivers/net/octeontx2 CFLAGS += -DALLOW_EXPERIMENTAL_API ifneq ($(CONFIG_RTE_ARCH_64),y) diff --git a/drivers/crypto/octeontx2/meson.build b/drivers/crypto/octeontx2/meson.build index b6e5b73..67deca3 100644 --- a/drivers/crypto/octeontx2/meson.build +++ b/drivers/crypto/octeontx2/meson.build @@ -8,6 +8,7 @@ endif deps += ['bus_pci'] deps += ['common_cpt'] deps += ['common_octeontx2'] +deps += ['ethdev'] name = 'octeontx2_crypto' allow_experimental_apis = true @@ -32,3 +33,4 @@ endforeach includes += include_directories('../../common/cpt') includes += include_directories('../../common/octeontx2') includes += include_directories('../../mempool/octeontx2') +includes += include_directories('../../net/octeontx2') diff --git a/drivers/crypto/octeontx2/otx2_cryptodev_mbox.c b/drivers/crypto/octeontx2/otx2_cryptodev_mbox.c index b54e407..3183210 100644 --- a/drivers/crypto/octeontx2/otx2_cryptodev_mbox.c +++ b/drivers/crypto/octeontx2/otx2_cryptodev_mbox.c @@ -2,10 +2,13 @@ * Copyright (C) 2019 Marvell International Ltd. */ #include +#include #include "otx2_cryptodev.h" +#include "otx2_cryptodev_hw_access.h" #include "otx2_cryptodev_mbox.h" #include "otx2_dev.h" +#include "otx2_ethdev.h" #include "otx2_mbox.h" #include "cpt_pmd_logs.h" @@ -173,3 +176,53 @@ otx2_cpt_af_reg_write(const struct rte_cryptodev *dev, uint64_t reg, return otx2_cpt_send_mbox_msg(vf); } + +int +otx2_cpt_inline_init(const struct rte_cryptodev *dev) +{ + struct otx2_cpt_vf *vf = dev->data->dev_private; + struct otx2_mbox *mbox = vf->otx2_dev.mbox; + struct cpt_rx_inline_lf_cfg_msg *msg; + int ret; + + msg = otx2_mbox_alloc_msg_cpt_rx_inline_lf_cfg(mbox); + msg->sso_pf_func = otx2_sso_pf_func_get(); + + otx2_mbox_msg_send(mbox, 0); + ret = otx2_mbox_process(mbox); + if (ret < 0) + return -EIO; + + return 0; +} + +int +otx2_cpt_qp_ethdev_bind(const struct rte_cryptodev *dev, struct otx2_cpt_qp *qp, + uint16_t port_id) +{ + struct rte_eth_dev *eth_dev = &rte_eth_devices[port_id]; + struct otx2_cpt_vf *vf = dev->data->dev_private; + struct otx2_mbox *mbox = vf->otx2_dev.mbox; + struct cpt_inline_ipsec_cfg_msg *msg; + struct otx2_eth_dev *otx2_eth_dev; + int ret; + + if (!otx2_is_ethdev(&rte_eth_devices[port_id])) + return -EINVAL; + + otx2_eth_dev = otx2_eth_pmd_priv(eth_dev); + + msg = otx2_mbox_alloc_msg_cpt_inline_ipsec_cfg(mbox); + msg->dir = CPT_INLINE_OUTBOUND; + msg->enable = 1; + msg->slot = qp->id; + + msg->nix_pf_func = otx2_eth_dev->pf_func; + + otx2_mbox_msg_send(mbox, 0); + ret = otx2_mbox_process(mbox); + if (ret < 0) + return -EIO; + + return 0; +} diff --git a/drivers/crypto/octeontx2/otx2_cryptodev_mbox.h b/drivers/crypto/octeontx2/otx2_cryptodev_mbox.h index a298718..ae66b08 100644 --- a/drivers/crypto/octeontx2/otx2_cryptodev_mbox.h +++ b/drivers/crypto/octeontx2/otx2_cryptodev_mbox.h @@ -7,6 +7,8 @@ #include +#include "otx2_cryptodev_hw_access.h" + int otx2_cpt_available_queues_get(const struct rte_cryptodev *dev, uint16_t *nb_queues); @@ -22,4 +24,9 @@ int otx2_cpt_af_reg_read(const struct rte_cryptodev *dev, uint64_t reg, int otx2_cpt_af_reg_write(const struct rte_cryptodev *dev, uint64_t reg, uint64_t val); +i
[dpdk-dev] [PATCH 06/15] crypto/octeontx2: add eth security capabilities
From: Ankur Dwivedi Adding security capabilities supported by the eth PMD. Signed-off-by: Ankur Dwivedi Signed-off-by: Anoob Joseph Signed-off-by: Archana Muniganti Signed-off-by: Tejasree Kondoj Signed-off-by: Vamsi Attunuru --- drivers/crypto/octeontx2/otx2_security.c | 124 +++ drivers/crypto/octeontx2/otx2_security.h | 18 + 2 files changed, 142 insertions(+) diff --git a/drivers/crypto/octeontx2/otx2_security.c b/drivers/crypto/octeontx2/otx2_security.c index 9cd4683..46b94e5 100644 --- a/drivers/crypto/octeontx2/otx2_security.c +++ b/drivers/crypto/octeontx2/otx2_security.c @@ -2,11 +2,13 @@ * Copyright (C) 2019 Marvell International Ltd. */ +#include #include #include #include #include #include +#include #include "otx2_ethdev.h" #include "otx2_ipsec_fp.h" @@ -27,12 +29,133 @@ struct sec_eth_tag_const { }; }; +static struct rte_cryptodev_capabilities otx2_sec_eth_crypto_caps[] = { + { /* AES GCM */ + .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, + {.sym = { + .xform_type = RTE_CRYPTO_SYM_XFORM_AEAD, + {.aead = { + .algo = RTE_CRYPTO_AEAD_AES_GCM, + .block_size = 16, + .key_size = { + .min = 16, + .max = 32, + .increment = 8 + }, + .digest_size = { + .min = 16, + .max = 16, + .increment = 0 + }, + .aad_size = { + .min = 8, + .max = 12, + .increment = 4 + }, + .iv_size = { + .min = 12, + .max = 12, + .increment = 0 + } + }, } + }, } + }, + { /* AES CBC */ + .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, + {.sym = { + .xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER, + {.cipher = { + .algo = RTE_CRYPTO_CIPHER_AES_CBC, + .block_size = 16, + .key_size = { + .min = 16, + .max = 32, + .increment = 8 + }, + .iv_size = { + .min = 16, + .max = 16, + .increment = 0 + } + }, } + }, } + }, + { /* SHA1 HMAC */ + .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, + {.sym = { + .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH, + {.auth = { + .algo = RTE_CRYPTO_AUTH_SHA1_HMAC, + .block_size = 64, + .key_size = { + .min = 20, + .max = 64, + .increment = 1 + }, + .digest_size = { + .min = 12, + .max = 12, + .increment = 0 + }, + }, } + }, } + }, + RTE_CRYPTODEV_END_OF_CAPABILITIES_LIST() +}; + +static const struct rte_security_capability otx2_sec_eth_capabilities[] = { + { /* IPsec Inline Protocol ESP Tunnel Ingress */ + .action = RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL, + .protocol = RTE_SECURITY_PROTOCOL_IPSEC, + .ipsec = { + .proto = RTE_SECURITY_IPSEC_SA_PROTO_ESP, + .mode = RTE_SECURITY_IPSEC_SA_MODE_TUNNEL, + .direction = RTE_SECURITY_IPSEC_SA_DIR_INGRESS, + .options = { 0 } + }, + .crypto_capabilities = otx2_sec_eth_crypto_caps, + .ol_flags = RTE_SECURITY_TX_OLOAD_NEED_MDATA + }, + { /* IPsec Inline Protocol ESP Tunnel Egress */ + .action = RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL, + .protocol = RTE_SECURITY_PROTOCOL_IPSEC, + .ip
[dpdk-dev] [PATCH 07/15] crypto/octeontx2: enable CPT to share QP with ethdev
Adding the infrastructure to save one opaque pointer in idev and implement the consumer-producer in the PMDs which uses it accordingly. Signed-off-by: Ankur Dwivedi Signed-off-by: Anoob Joseph Signed-off-by: Archana Muniganti Signed-off-by: Tejasree Kondoj Signed-off-by: Vamsi Attunuru --- .../crypto/octeontx2/otx2_cryptodev_hw_access.h| 22 + drivers/crypto/octeontx2/otx2_cryptodev_ops.c | 18 drivers/crypto/octeontx2/otx2_cryptodev_qp.h | 35 drivers/crypto/octeontx2/otx2_security.c | 98 ++ drivers/crypto/octeontx2/otx2_security.h | 20 + 5 files changed, 172 insertions(+), 21 deletions(-) create mode 100644 drivers/crypto/octeontx2/otx2_cryptodev_qp.h diff --git a/drivers/crypto/octeontx2/otx2_cryptodev_hw_access.h b/drivers/crypto/octeontx2/otx2_cryptodev_hw_access.h index 6f78aa4..43db6a6 100644 --- a/drivers/crypto/octeontx2/otx2_cryptodev_hw_access.h +++ b/drivers/crypto/octeontx2/otx2_cryptodev_hw_access.h @@ -15,6 +15,7 @@ #include "cpt_mcode_defines.h" #include "otx2_dev.h" +#include "otx2_cryptodev_qp.h" /* CPT instruction queue length */ #define OTX2_CPT_IQ_LEN8200 @@ -135,27 +136,6 @@ enum cpt_9x_comp_e { CPT_9X_COMP_E_LAST_ENTRY = 0x06 }; -struct otx2_cpt_qp { - uint32_t id; - /**< Queue pair id */ - uintptr_t base; - /**< Base address where BAR is mapped */ - void *lmtline; - /**< Address of LMTLINE */ - rte_iova_t lf_nq_reg; - /**< LF enqueue register address */ - struct pending_queue pend_q; - /**< Pending queue */ - struct rte_mempool *sess_mp; - /**< Session mempool */ - struct rte_mempool *sess_mp_priv; - /**< Session private data mempool */ - struct cpt_qp_meta_info meta_info; - /**< Metabuf info required to support operations on the queue pair */ - rte_iova_t iq_dma_addr; - /**< Instruction queue address */ -}; - void otx2_cpt_err_intr_unregister(const struct rte_cryptodev *dev); int otx2_cpt_err_intr_register(const struct rte_cryptodev *dev); diff --git a/drivers/crypto/octeontx2/otx2_cryptodev_ops.c b/drivers/crypto/octeontx2/otx2_cryptodev_ops.c index a1213ca..702a653 100644 --- a/drivers/crypto/octeontx2/otx2_cryptodev_ops.c +++ b/drivers/crypto/octeontx2/otx2_cryptodev_ops.c @@ -13,6 +13,7 @@ #include "otx2_cryptodev_hw_access.h" #include "otx2_cryptodev_mbox.h" #include "otx2_cryptodev_ops.h" +#include "otx2_security.h" #include "otx2_mbox.h" #include "cpt_hw_types.h" @@ -146,6 +147,11 @@ otx2_cpt_qp_inline_cfg(const struct rte_cryptodev *dev, struct otx2_cpt_qp *qp) if (ret) return ret; + /* Publish inline Tx QP to eth dev security */ + ret = otx2_sec_tx_cpt_qp_add(port_id, qp); + if (ret) + return ret; + return 0; } @@ -240,6 +246,12 @@ otx2_cpt_qp_create(const struct rte_cryptodev *dev, uint16_t qp_id, qp->lf_nq_reg = qp->base + OTX2_CPT_LF_NQ(0); + ret = otx2_sec_tx_cpt_qp_remove(qp); + if (ret && (ret != -ENOENT)) { + CPT_LOG_ERR("Could not delete inline configuration"); + goto mempool_destroy; + } + otx2_cpt_iq_disable(qp); ret = otx2_cpt_qp_inline_cfg(dev, qp); @@ -273,6 +285,12 @@ otx2_cpt_qp_destroy(const struct rte_cryptodev *dev, struct otx2_cpt_qp *qp) char name[RTE_MEMZONE_NAMESIZE]; int ret; + ret = otx2_sec_tx_cpt_qp_remove(qp); + if (ret && (ret != -ENOENT)) { + CPT_LOG_ERR("Could not delete inline configuration"); + return ret; + } + otx2_cpt_iq_disable(qp); otx2_cpt_metabuf_mempool_destroy(qp); diff --git a/drivers/crypto/octeontx2/otx2_cryptodev_qp.h b/drivers/crypto/octeontx2/otx2_cryptodev_qp.h new file mode 100644 index 000..caf8272 --- /dev/null +++ b/drivers/crypto/octeontx2/otx2_cryptodev_qp.h @@ -0,0 +1,35 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright (C) 2019 Marvell International Ltd. + */ + +#ifndef _OTX2_CRYPTODEV_QP_H_ +#define _OTX2_CRYPTODEV_QP_H_ + +#include +#include +#include + +#include "cpt_common.h" + +struct otx2_cpt_qp { + uint32_t id; + /**< Queue pair id */ + uintptr_t base; + /**< Base address where BAR is mapped */ + void *lmtline; + /**< Address of LMTLINE */ + rte_iova_t lf_nq_reg; + /**< LF enqueue register address */ + struct pending_queue pend_q; + /**< Pending queue */ + struct rte_mempool *sess_mp; + /**< Session mempool */ + struct rte_mempool *sess_mp_priv; + /**< Session private data mempool */ + struct cpt_qp_meta_info meta_info; + /**< Metabuf info required to support operations on the queue pair */ + rte_iova_t iq_dma_addr; + /**< Instruction queue address */ +}; + +#endif /* _OTX2_CRYPTODEV_QP_H_ */ diff --git a/drivers/cr
[dpdk-dev] [PATCH 10/15] crypto/octeontx2: add lookup mem changes to hold sa indices
From: Archana Muniganti lookup_mem provides fast accessing of data path fields. Storing sa indices in lookup_mem which are required in inline rx data path. Signed-off-by: Ankur Dwivedi Signed-off-by: Anoob Joseph Signed-off-by: Archana Muniganti Signed-off-by: Tejasree Kondoj Signed-off-by: Vamsi Attunuru --- drivers/common/octeontx2/otx2_common.h | 20 +++ drivers/crypto/octeontx2/otx2_security.c | 59 +++- drivers/net/octeontx2/otx2_lookup.c | 9 + drivers/net/octeontx2/otx2_rx.h | 10 ++ 4 files changed, 81 insertions(+), 17 deletions(-) diff --git a/drivers/common/octeontx2/otx2_common.h b/drivers/common/octeontx2/otx2_common.h index 9705a8d..6456c4b 100644 --- a/drivers/common/octeontx2/otx2_common.h +++ b/drivers/common/octeontx2/otx2_common.h @@ -170,4 +170,24 @@ extern int otx2_logtype_dpi; #include "otx2_io_generic.h" #endif +/* Fastpath lookup */ +#define OTX2_NIX_FASTPATH_LOOKUP_MEM "otx2_nix_fastpath_lookup_mem" +#define PTYPE_NON_TUNNEL_WIDTH 16 +#define PTYPE_TUNNEL_WIDTH 12 +#define PTYPE_NON_TUNNEL_ARRAY_SZ BIT(PTYPE_NON_TUNNEL_WIDTH) +#define PTYPE_TUNNEL_ARRAY_SZ BIT(PTYPE_TUNNEL_WIDTH) +#define PTYPE_ARRAY_SZ ((PTYPE_NON_TUNNEL_ARRAY_SZ +\ +PTYPE_TUNNEL_ARRAY_SZ) *\ +sizeof(uint16_t)) + +/* NIX_RX_PARSE_S's ERRCODE + ERRLEV (12 bits) */ +#define ERRCODE_ERRLEN_WIDTH 12 +#define ERR_ARRAY_SZ ((BIT(ERRCODE_ERRLEN_WIDTH)) *\ + sizeof(uint32_t)) + +#define PORT_ARRAY_SZ (RTE_MAX_ETHPORTS * sizeof(uint64_t)) + +#define LOOKUP_ARRAY_SZ(PTYPE_ARRAY_SZ + ERR_ARRAY_SZ +\ + PORT_ARRAY_SZ) + #endif /* _OTX2_COMMON_H_ */ diff --git a/drivers/crypto/octeontx2/otx2_security.c b/drivers/crypto/octeontx2/otx2_security.c index 545c806..4d762d9 100644 --- a/drivers/crypto/octeontx2/otx2_security.c +++ b/drivers/crypto/octeontx2/otx2_security.c @@ -136,6 +136,59 @@ static const struct rte_security_capability otx2_sec_eth_capabilities[] = { } }; +static void +lookup_mem_sa_tbl_clear(struct rte_eth_dev *eth_dev) +{ + static const char name[] = OTX2_NIX_FASTPATH_LOOKUP_MEM; + uint16_t port = eth_dev->data->port_id; + const struct rte_memzone *mz; + uint64_t **sa_tbl; + uint8_t *mem; + + mz = rte_memzone_lookup(name); + if (mz == NULL) + return; + + mem = mz->addr; + + sa_tbl = (uint64_t **)(mem + PTYPE_ARRAY_SZ + ERR_ARRAY_SZ); + if (sa_tbl[port] == NULL) + return; + + rte_free(sa_tbl[port]); + sa_tbl[port] = NULL; +} + +static int +lookup_mem_sa_index_update(struct rte_eth_dev *eth_dev, int spi, void *sa) +{ + static const char name[] = OTX2_NIX_FASTPATH_LOOKUP_MEM; + struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev); + uint16_t port = eth_dev->data->port_id; + const struct rte_memzone *mz; + uint64_t **sa_tbl; + uint8_t *mem; + + mz = rte_memzone_lookup(name); + if (mz == NULL) { + otx2_err("Could not find fastpath lookup table"); + return -EINVAL; + } + + mem = mz->addr; + + sa_tbl = (uint64_t **)(mem + PTYPE_ARRAY_SZ + ERR_ARRAY_SZ); + + if (sa_tbl[port] == NULL) { + sa_tbl[port] = rte_malloc(NULL, dev->ipsec_in_max_spi * + sizeof(uint64_t), 0); + } + + sa_tbl[port][spi] = (uint64_t)sa; + + return 0; +} + static int otx2_sec_eth_tx_cpt_qp_get(uint16_t port_id, struct otx2_cpt_qp **qp) { @@ -383,8 +436,10 @@ sec_eth_ipsec_in_sess_create(struct rte_eth_dev *eth_dev, sa->userdata = priv->userdata; - return ipsec_fp_sa_ctl_set(ipsec, crypto_xform, ctl); + if (lookup_mem_sa_index_update(eth_dev, ipsec->spi, sa)) + return -EINVAL; + return ipsec_fp_sa_ctl_set(ipsec, crypto_xform, ctl); } static int @@ -667,6 +722,8 @@ otx2_sec_eth_fini(struct rte_eth_dev *eth_dev) !(dev->rx_offloads & DEV_RX_OFFLOAD_SECURITY)) return; + lookup_mem_sa_tbl_clear(eth_dev); + in_sa_mz_name_get(name, RTE_MEMZONE_NAMESIZE, port); rte_memzone_free(rte_memzone_lookup(name)); } diff --git a/drivers/net/octeontx2/otx2_lookup.c b/drivers/net/octeontx2/otx2_lookup.c index bcf2ff4..46fdbc8 100644 --- a/drivers/net/octeontx2/otx2_lookup.c +++ b/drivers/net/octeontx2/otx2_lookup.c @@ -7,13 +7,6 @@ #include "otx2_ethdev.h" -/* NIX_RX_PARSE_S's ERRCODE + ERRLEV (12 bits) */ -#define ERRCODE_ERRLEN_WIDTH 12 -#define ERR_ARRAY_SZ ((BIT(ERRCODE_ERRLEN_WIDTH)) *\ - sizeof(uint32_t)) - -#define LOOKUP_ARRAY_SZ(PTYPE_ARRA
[dpdk-dev] [PATCH 11/15] net/octeontx2: add inline ipsec rx path changes
From: Tejasree Kondoj Adding post-processing required for inline IPsec inbound packets. Signed-off-by: Ankur Dwivedi Signed-off-by: Anoob Joseph Signed-off-by: Archana Muniganti Signed-off-by: Tejasree Kondoj Signed-off-by: Vamsi Attunuru --- drivers/crypto/octeontx2/Makefile| 1 + drivers/crypto/octeontx2/otx2_security.h | 19 + drivers/event/octeontx2/Makefile | 1 + drivers/event/octeontx2/meson.build | 2 + drivers/net/octeontx2/Makefile | 1 + drivers/net/octeontx2/meson.build| 3 ++ drivers/net/octeontx2/otx2_rx.h | 72 7 files changed, 99 insertions(+) diff --git a/drivers/crypto/octeontx2/Makefile b/drivers/crypto/octeontx2/Makefile index 5966ddc..62b630e 100644 --- a/drivers/crypto/octeontx2/Makefile +++ b/drivers/crypto/octeontx2/Makefile @@ -20,6 +20,7 @@ VPATH += $(RTE_SDK)/drivers/crypto/octeontx2 CFLAGS += -O3 CFLAGS += -I$(RTE_SDK)/drivers/common/cpt CFLAGS += -I$(RTE_SDK)/drivers/common/octeontx2 +CFLAGS += -I$(RTE_SDK)/drivers/crypto/octeontx2 CFLAGS += -I$(RTE_SDK)/drivers/mempool/octeontx2 CFLAGS += -I$(RTE_SDK)/drivers/net/octeontx2 CFLAGS += -DALLOW_EXPERIMENTAL_API diff --git a/drivers/crypto/octeontx2/otx2_security.h b/drivers/crypto/octeontx2/otx2_security.h index adca00b..1229685 100644 --- a/drivers/crypto/octeontx2/otx2_security.h +++ b/drivers/crypto/octeontx2/otx2_security.h @@ -26,6 +26,25 @@ struct otx2_sec_eth_cfg { rte_spinlock_t tx_cpt_lock; }; +#define OTX2_SEC_CPT_COMP_GOOD 0x1 +#define OTX2_SEC_UC_COMP_GOOD 0x0 +#define OTX2_SEC_COMP_GOOD (OTX2_SEC_UC_COMP_GOOD << 8 | \ +OTX2_SEC_CPT_COMP_GOOD) + +/* CPT Result */ +struct otx2_cpt_res { + union { + struct { + uint64_t compcode:8; + uint64_t uc_compcode:8; + uint64_t doneint:1; + uint64_t reserved_17_63:47; + uint64_t reserved_64_127; + }; + uint16_t u16[8]; + }; +}; + /* * Security session for inline IPsec protocol offload. This is private data of * inline capable PMD. diff --git a/drivers/event/octeontx2/Makefile b/drivers/event/octeontx2/Makefile index 6dab69c..bcd22ee 100644 --- a/drivers/event/octeontx2/Makefile +++ b/drivers/event/octeontx2/Makefile @@ -11,6 +11,7 @@ LIB = librte_pmd_octeontx2_event.a CFLAGS += $(WERROR_FLAGS) CFLAGS += -I$(RTE_SDK)/drivers/common/octeontx2 +CFLAGS += -I$(RTE_SDK)/drivers/crypto/octeontx2 CFLAGS += -I$(RTE_SDK)/drivers/mempool/octeontx2 CFLAGS += -I$(RTE_SDK)/drivers/event/octeontx2 CFLAGS += -I$(RTE_SDK)/drivers/net/octeontx2 diff --git a/drivers/event/octeontx2/meson.build b/drivers/event/octeontx2/meson.build index 807818b..56febb8 100644 --- a/drivers/event/octeontx2/meson.build +++ b/drivers/event/octeontx2/meson.build @@ -32,3 +32,5 @@ foreach flag: extra_flags endforeach deps += ['bus_pci', 'common_octeontx2', 'mempool_octeontx2', 'pmd_octeontx2'] + +includes += include_directories('../../crypto/octeontx2') diff --git a/drivers/net/octeontx2/Makefile b/drivers/net/octeontx2/Makefile index 68f5765..d31ce0a 100644 --- a/drivers/net/octeontx2/Makefile +++ b/drivers/net/octeontx2/Makefile @@ -11,6 +11,7 @@ LIB = librte_pmd_octeontx2.a CFLAGS += $(WERROR_FLAGS) CFLAGS += -I$(RTE_SDK)/drivers/common/octeontx2 +CFLAGS += -I$(RTE_SDK)/drivers/crypto/octeontx2 CFLAGS += -I$(RTE_SDK)/drivers/mempool/octeontx2 CFLAGS += -I$(RTE_SDK)/drivers/net/octeontx2 CFLAGS += -O3 diff --git a/drivers/net/octeontx2/meson.build b/drivers/net/octeontx2/meson.build index fad3076..4a06eb2 100644 --- a/drivers/net/octeontx2/meson.build +++ b/drivers/net/octeontx2/meson.build @@ -25,6 +25,7 @@ sources = files('otx2_rx.c', ) deps += ['bus_pci', 'common_octeontx2', 'mempool_octeontx2'] +deps += ['cryptodev', 'security'] cflags += ['-flax-vector-conversions'] @@ -39,3 +40,5 @@ foreach flag: extra_flags cflags += flag endif endforeach + +includes += include_directories('../../crypto/octeontx2') diff --git a/drivers/net/octeontx2/otx2_rx.h b/drivers/net/octeontx2/otx2_rx.h index 5e1d5a2..f1dbfb7 100644 --- a/drivers/net/octeontx2/otx2_rx.h +++ b/drivers/net/octeontx2/otx2_rx.h @@ -5,7 +5,11 @@ #ifndef __OTX2_RX_H__ #define __OTX2_RX_H__ +#include + #include "otx2_common.h" +#include "otx2_ipsec_fp.h" +#include "otx2_security.h" /* Default mark value used when none is provided. */ #define OTX2_FLOW_ACTION_FLAG_DEFAULT 0x @@ -25,6 +29,12 @@ #define NIX_RX_MULTI_SEG_FBIT(15) #define NIX_TIMESYNC_RX_OFFSET 8 +/* Inline IPsec offsets */ + +#define INLINE_INB_RPTR_HDR16 +/* nix_cqe_hdr_s + nix_rx_parse_s + nix_rx_sg_s + nix_iova_s */ +#define INLINE_CPT_RESULT_OFFSET 80 + struct otx2_timesync_info { uint64_trx_tstamp; rte_iova_t tx_tstamp_
[dpdk-dev] [PATCH 13/15] drivers/octeontx2: add sec in compiler optimized TX fastpath framework
From: Archana Muniganti Added new flag for SECURITY in compiler optimized TX fastpath framework. With this, compiler autogenerates functions which have security enabled. Signed-off-by: Ankur Dwivedi Signed-off-by: Anoob Joseph Signed-off-by: Archana Muniganti Signed-off-by: Tejasree Kondoj Signed-off-by: Vamsi Attunuru --- drivers/event/octeontx2/otx2_evdev.c | 36 ++-- drivers/event/octeontx2/otx2_evdev.h | 2 +- drivers/event/octeontx2/otx2_worker.c | 4 +- drivers/event/octeontx2/otx2_worker_dual.c | 4 +- drivers/net/octeontx2/otx2_ethdev.c| 3 + drivers/net/octeontx2/otx2_tx.c| 29 +-- drivers/net/octeontx2/otx2_tx.h| 271 ++--- 7 files changed, 250 insertions(+), 99 deletions(-) diff --git a/drivers/event/octeontx2/otx2_evdev.c b/drivers/event/octeontx2/otx2_evdev.c index f6c641a..d20213d 100644 --- a/drivers/event/octeontx2/otx2_evdev.c +++ b/drivers/event/octeontx2/otx2_evdev.c @@ -177,35 +177,37 @@ SSO_RX_ADPTR_ENQ_FASTPATH_FUNC }; /* Tx modes */ - const event_tx_adapter_enqueue ssogws_tx_adptr_enq[2][2][2][2][2][2] = { -#define T(name, f5, f4, f3, f2, f1, f0, sz, flags) \ - [f5][f4][f3][f2][f1][f0] = otx2_ssogws_tx_adptr_enq_ ## name, + const event_tx_adapter_enqueue + ssogws_tx_adptr_enq[2][2][2][2][2][2][2] = { +#define T(name, f6, f5, f4, f3, f2, f1, f0, sz, flags) \ + [f6][f5][f4][f3][f2][f1][f0] = \ + otx2_ssogws_tx_adptr_enq_ ## name, SSO_TX_ADPTR_ENQ_FASTPATH_FUNC #undef T }; const event_tx_adapter_enqueue - ssogws_tx_adptr_enq_seg[2][2][2][2][2][2] = { -#define T(name, f5, f4, f3, f2, f1, f0, sz, flags) \ - [f5][f4][f3][f2][f1][f0] = \ + ssogws_tx_adptr_enq_seg[2][2][2][2][2][2][2] = { +#define T(name, f6, f5, f4, f3, f2, f1, f0, sz, flags) \ + [f6][f5][f4][f3][f2][f1][f0] = \ otx2_ssogws_tx_adptr_enq_seg_ ## name, SSO_TX_ADPTR_ENQ_FASTPATH_FUNC #undef T }; const event_tx_adapter_enqueue - ssogws_dual_tx_adptr_enq[2][2][2][2][2][2] = { -#define T(name, f5, f4, f3, f2, f1, f0, sz, flags) \ - [f5][f4][f3][f2][f1][f0] = \ + ssogws_dual_tx_adptr_enq[2][2][2][2][2][2][2] = { +#define T(name, f6, f5, f4, f3, f2, f1, f0, sz, flags) \ + [f6][f5][f4][f3][f2][f1][f0] = \ otx2_ssogws_dual_tx_adptr_enq_ ## name, SSO_TX_ADPTR_ENQ_FASTPATH_FUNC #undef T }; const event_tx_adapter_enqueue - ssogws_dual_tx_adptr_enq_seg[2][2][2][2][2][2] = { -#define T(name, f5, f4, f3, f2, f1, f0, sz, flags) \ - [f5][f4][f3][f2][f1][f0] = \ + ssogws_dual_tx_adptr_enq_seg[2][2][2][2][2][2][2] = { +#define T(name, f6, f5, f4, f3, f2, f1, f0, sz, flags) \ + [f6][f5][f4][f3][f2][f1][f0] = \ otx2_ssogws_dual_tx_adptr_enq_seg_ ## name, SSO_TX_ADPTR_ENQ_FASTPATH_FUNC #undef T @@ -290,8 +292,9 @@ SSO_TX_ADPTR_ENQ_FASTPATH_FUNC } if (dev->tx_offloads & NIX_TX_MULTI_SEG_F) { - /* [TSMP] [MBUF_NOFF] [VLAN] [OL3_L4_CSUM] [L3_L4_CSUM] */ + /* [SEC] [TSMP] [MBUF_NOFF] [VLAN] [OL3_L4_CSUM] [L3_L4_CSUM] */ event_dev->txa_enqueue = ssogws_tx_adptr_enq_seg + [!!(dev->tx_offloads & NIX_TX_OFFLOAD_SECURITY_F)] [!!(dev->tx_offloads & NIX_TX_OFFLOAD_TSO_F)] [!!(dev->tx_offloads & NIX_TX_OFFLOAD_TSTAMP_F)] [!!(dev->tx_offloads & NIX_TX_OFFLOAD_MBUF_NOFF_F)] @@ -300,6 +303,7 @@ SSO_TX_ADPTR_ENQ_FASTPATH_FUNC [!!(dev->tx_offloads & NIX_TX_OFFLOAD_L3_L4_CSUM_F)]; } else { event_dev->txa_enqueue = ssogws_tx_adptr_enq + [!!(dev->tx_offloads & NIX_TX_OFFLOAD_SECURITY_F)] [!!(dev->tx_offloads & NIX_TX_OFFLOAD_TSO_F)] [!!(dev->tx_offloads & NIX_TX_OFFLOAD_TSTAMP_F)] [!!(dev->tx_offloads & NIX_TX_OFFLOAD_MBUF_NOFF_F)] @@ -440,8 +444,10 @@ SSO_TX_ADPTR_ENQ_FASTPATH_FUNC } if (dev->tx_offloads & NIX_TX_MULTI_SEG_F) { - /* [TSMP] [MBUF_NOFF] [VLAN] [OL3_L4_CSUM] [L3_L4_CSUM] */ + /* [SEC] [TSMP] [MBUF_NOFF] [VLAN] [OL3_L4_CSUM] [L3_L4_CSUM] */ event_dev->txa_enqueue = ssogws_dual_tx_adptr_enq_seg + [!!(dev->tx_offloads & +
[dpdk-dev] [PATCH 09/15] crypto/octeontx2: add datapath ops in eth security ctx
From: Ankur Dwivedi Adding data path ops in eth security ctx. Signed-off-by: Ankur Dwivedi Signed-off-by: Anoob Joseph Signed-off-by: Archana Muniganti Signed-off-by: Tejasree Kondoj Signed-off-by: Vamsi Attunuru --- drivers/crypto/octeontx2/otx2_security.c | 23 +++ 1 file changed, 23 insertions(+) diff --git a/drivers/crypto/octeontx2/otx2_security.c b/drivers/crypto/octeontx2/otx2_security.c index 393bc12..545c806 100644 --- a/drivers/crypto/octeontx2/otx2_security.c +++ b/drivers/crypto/octeontx2/otx2_security.c @@ -486,6 +486,27 @@ otx2_sec_eth_session_get_size(void *device __rte_unused) return sizeof(struct otx2_sec_session); } +static int +otx2_sec_eth_set_pkt_mdata(void *device __rte_unused, + struct rte_security_session *session, + struct rte_mbuf *m, void *params __rte_unused) +{ + /* Set security session as the pkt metadata */ + m->udata64 = (uint64_t)session; + + return 0; +} + +static int +otx2_sec_eth_get_userdata(void *device __rte_unused, uint64_t md, + void **userdata) +{ + /* Retrieve userdata */ + *userdata = (void *)md; + + return 0; +} + static const struct rte_security_capability * otx2_sec_eth_capabilities_get(void *device __rte_unused) { @@ -496,6 +517,8 @@ static struct rte_security_ops otx2_sec_eth_ops = { .session_create = otx2_sec_eth_session_create, .session_destroy= otx2_sec_eth_session_destroy, .session_get_size = otx2_sec_eth_session_get_size, + .set_pkt_metadata = otx2_sec_eth_set_pkt_mdata, + .get_userdata = otx2_sec_eth_get_userdata, .capabilities_get = otx2_sec_eth_capabilities_get }; -- 2.7.4
[dpdk-dev] [PATCH 14/15] crypto/octeontx2: add inline tx path changes
From: Ankur Dwivedi Adding pre-processing required for inline IPsec outbound packets. Signed-off-by: Ankur Dwivedi Signed-off-by: Anoob Joseph Signed-off-by: Archana Muniganti Signed-off-by: Tejasree Kondoj Signed-off-by: Vamsi Attunuru --- drivers/crypto/octeontx2/otx2_security.c| 82 + drivers/crypto/octeontx2/otx2_security.h| 60 ++ drivers/crypto/octeontx2/otx2_security_tx.h | 176 drivers/event/octeontx2/meson.build | 3 +- drivers/event/octeontx2/otx2_worker.h | 6 + 5 files changed, 326 insertions(+), 1 deletion(-) create mode 100644 drivers/crypto/octeontx2/otx2_security_tx.h diff --git a/drivers/crypto/octeontx2/otx2_security.c b/drivers/crypto/octeontx2/otx2_security.c index 4d762d9..7bd5625 100644 --- a/drivers/crypto/octeontx2/otx2_security.c +++ b/drivers/crypto/octeontx2/otx2_security.c @@ -3,12 +3,15 @@ */ #include +#include #include #include +#include #include #include #include #include +#include #include "otx2_cryptodev_qp.h" #include "otx2_ethdev.h" @@ -17,6 +20,15 @@ #define SEC_ETH_MAX_PKT_LEN1450 +#define AH_HDR_LEN 12 +#define AES_GCM_IV_LEN 8 +#define AES_GCM_MAC_LEN16 +#define AES_CBC_IV_LEN 16 +#define SHA1_HMAC_LEN 12 + +#define AES_GCM_ROUNDUP_BYTE_LEN 4 +#define AES_CBC_ROUNDUP_BYTE_LEN 16 + struct sec_eth_tag_const { RTE_STD_C11 union { @@ -278,6 +290,60 @@ in_sa_get(uint16_t port, int sa_index) } static int +ipsec_sa_const_set(struct rte_security_ipsec_xform *ipsec, + struct rte_crypto_sym_xform *xform, + struct otx2_sec_session_ipsec_ip *sess) +{ + struct rte_crypto_sym_xform *cipher_xform, *auth_xform; + + sess->partial_len = sizeof(struct rte_ipv4_hdr); + + if (ipsec->proto == RTE_SECURITY_IPSEC_SA_PROTO_ESP) { + sess->partial_len += sizeof(struct rte_esp_hdr); + sess->roundup_len = sizeof(struct rte_esp_tail); + } else if (ipsec->proto == RTE_SECURITY_IPSEC_SA_PROTO_AH) { + sess->partial_len += AH_HDR_LEN; + } else { + return -EINVAL; + } + + if (ipsec->options.udp_encap) + sess->partial_len += sizeof(struct rte_udp_hdr); + + if (xform->type == RTE_CRYPTO_SYM_XFORM_AEAD) { + if (xform->aead.algo == RTE_CRYPTO_AEAD_AES_GCM) { + sess->partial_len += AES_GCM_IV_LEN; + sess->partial_len += AES_GCM_MAC_LEN; + sess->roundup_byte = AES_GCM_ROUNDUP_BYTE_LEN; + } + return 0; + } + + if (ipsec->direction == RTE_SECURITY_IPSEC_SA_DIR_EGRESS) { + cipher_xform = xform; + auth_xform = xform->next; + } else if (ipsec->direction == RTE_SECURITY_IPSEC_SA_DIR_INGRESS) { + auth_xform = xform; + cipher_xform = xform->next; + } else { + return -EINVAL; + } + if (cipher_xform->cipher.algo == RTE_CRYPTO_CIPHER_AES_CBC) { + sess->partial_len += AES_CBC_IV_LEN; + sess->roundup_byte = AES_CBC_ROUNDUP_BYTE_LEN; + } else { + return -EINVAL; + } + + if (auth_xform->auth.algo == RTE_CRYPTO_AUTH_SHA1_HMAC) + sess->partial_len += SHA1_HMAC_LEN; + else + return -EINVAL; + + return 0; +} + +static int sec_eth_ipsec_out_sess_create(struct rte_eth_dev *eth_dev, struct rte_security_ipsec_xform *ipsec, struct rte_crypto_sym_xform *crypto_xform, @@ -291,6 +357,7 @@ sec_eth_ipsec_out_sess_create(struct rte_eth_dev *eth_dev, struct otx2_ipsec_fp_sa_ctl *ctl; struct otx2_ipsec_fp_out_sa *sa; struct otx2_sec_session *priv; + struct otx2_cpt_inst_s inst; struct otx2_cpt_qp *qp; priv = get_sec_session_private_data(sec_sess); @@ -305,6 +372,12 @@ sec_eth_ipsec_out_sess_create(struct rte_eth_dev *eth_dev, memset(sess, 0, sizeof(struct otx2_sec_session_ipsec_ip)); + sess->seq = 1; + + ret = ipsec_sa_const_set(ipsec, crypto_xform, sess); + if (ret < 0) + return ret; + memcpy(sa->nonce, &ipsec->salt, 4); if (ipsec->options.udp_encap == 1) { @@ -313,6 +386,9 @@ sec_eth_ipsec_out_sess_create(struct rte_eth_dev *eth_dev, } if (ipsec->mode == RTE_SECURITY_IPSEC_SA_MODE_TUNNEL) { + /* Start ip id from 1 */ + sess->ip_id = 1; + if (ipsec->tunnel.type == RTE_SECURITY_IPSEC_TUNNEL_IPV4) { memcpy(&sa->ip_src, &ipsec->tunnel.ipv4.src_ip, sizeof(struct in_addr)); @@ -346,6 +422,12 @@ sec_eth_ipsec_out_sess_create(struct rte_eth_dev *eth_dev, else return -EINVAL; + /* Determine word 7 of CPT instr
[dpdk-dev] [PATCH 12/15] drivers/octeontx2: add sec in compiler optimized RX fastpath framework
From: Archana Muniganti Added new flag for SECURITY in RX compiler optimized fastpath framework. With this, compiler autogenerates functions which have security enabled. Signed-off-by: Ankur Dwivedi Signed-off-by: Anoob Joseph Signed-off-by: Archana Muniganti Signed-off-by: Tejasree Kondoj Signed-off-by: Vamsi Attunuru --- drivers/event/octeontx2/otx2_evdev.c | 134 - drivers/event/octeontx2/otx2_evdev.h | 2 +- drivers/event/octeontx2/otx2_worker.c | 2 +- drivers/event/octeontx2/otx2_worker_dual.c | 2 +- drivers/net/octeontx2/otx2_ethdev.c| 3 + drivers/net/octeontx2/otx2_rx.c| 27 +-- drivers/net/octeontx2/otx2_rx.h| 306 - 7 files changed, 320 insertions(+), 156 deletions(-) diff --git a/drivers/event/octeontx2/otx2_evdev.c b/drivers/event/octeontx2/otx2_evdev.c index 2daeba4..f6c641a 100644 --- a/drivers/event/octeontx2/otx2_evdev.c +++ b/drivers/event/octeontx2/otx2_evdev.c @@ -44,61 +44,64 @@ sso_fastpath_fns_set(struct rte_eventdev *event_dev) { struct otx2_sso_evdev *dev = sso_pmd_priv(event_dev); /* Single WS modes */ - const event_dequeue_t ssogws_deq[2][2][2][2][2][2] = { -#define R(name, f5, f4, f3, f2, f1, f0, flags) \ - [f5][f4][f3][f2][f1][f0] = otx2_ssogws_deq_ ##name, + const event_dequeue_t ssogws_deq[2][2][2][2][2][2][2] = { +#define R(name, f6, f5, f4, f3, f2, f1, f0, flags) \ + [f6][f5][f4][f3][f2][f1][f0] = otx2_ssogws_deq_ ##name, SSO_RX_ADPTR_ENQ_FASTPATH_FUNC #undef R }; - const event_dequeue_burst_t ssogws_deq_burst[2][2][2][2][2][2] = { -#define R(name, f5, f4, f3, f2, f1, f0, flags) \ - [f5][f4][f3][f2][f1][f0] = otx2_ssogws_deq_burst_ ##name, + const event_dequeue_burst_t ssogws_deq_burst[2][2][2][2][2][2][2] = { +#define R(name, f6, f5, f4, f3, f2, f1, f0, flags) \ + [f6][f5][f4][f3][f2][f1][f0] = otx2_ssogws_deq_burst_ ##name, SSO_RX_ADPTR_ENQ_FASTPATH_FUNC #undef R }; - const event_dequeue_t ssogws_deq_timeout[2][2][2][2][2][2] = { -#define R(name, f5, f4, f3, f2, f1, f0, flags) \ - [f5][f4][f3][f2][f1][f0] = otx2_ssogws_deq_timeout_ ##name, + const event_dequeue_t ssogws_deq_timeout[2][2][2][2][2][2][2] = { +#define R(name, f6, f5, f4, f3, f2, f1, f0, flags) \ + [f6][f5][f4][f3][f2][f1][f0] = otx2_ssogws_deq_timeout_ ##name, SSO_RX_ADPTR_ENQ_FASTPATH_FUNC #undef R }; const event_dequeue_burst_t - ssogws_deq_timeout_burst[2][2][2][2][2][2] = { -#define R(name, f5, f4, f3, f2, f1, f0, flags) \ - [f5][f4][f3][f2][f1][f0] = \ + ssogws_deq_timeout_burst[2][2][2][2][2][2][2] = { +#define R(name, f6, f5, f4, f3, f2, f1, f0, flags) \ + [f6][f5][f4][f3][f2][f1][f0] = \ otx2_ssogws_deq_timeout_burst_ ##name, SSO_RX_ADPTR_ENQ_FASTPATH_FUNC #undef R }; - const event_dequeue_t ssogws_deq_seg[2][2][2][2][2][2] = { -#define R(name, f5, f4, f3, f2, f1, f0, flags) \ - [f5][f4][f3][f2][f1][f0] = otx2_ssogws_deq_seg_ ##name, + const event_dequeue_t ssogws_deq_seg[2][2][2][2][2][2][2] = { +#define R(name, f6, f5, f4, f3, f2, f1, f0, flags) \ + [f6][f5][f4][f3][f2][f1][f0] = otx2_ssogws_deq_seg_ ##name, SSO_RX_ADPTR_ENQ_FASTPATH_FUNC #undef R }; - const event_dequeue_burst_t ssogws_deq_seg_burst[2][2][2][2][2][2] = { -#define R(name, f5, f4, f3, f2, f1, f0, flags) \ - [f5][f4][f3][f2][f1][f0] = otx2_ssogws_deq_seg_burst_ ##name, + const event_dequeue_burst_t + ssogws_deq_seg_burst[2][2][2][2][2][2][2] = { +#define R(name, f6, f5, f4, f3, f2, f1, f0, flags) \ + [f6][f5][f4][f3][f2][f1][f0] = \ + otx2_ssogws_deq_seg_burst_ ##name, SSO_RX_ADPTR_ENQ_FASTPATH_FUNC #undef R }; - const event_dequeue_t ssogws_deq_seg_timeout[2][2][2][2][2][2] = { -#define R(name, f5, f4, f3, f2, f1, f0, flags) \ - [f5][f4][f3][f2][f1][f0] = otx2_ssogws_deq_seg_timeout_ ##name, + const event_dequeue_t ssogws_deq_seg_timeout[2][2][2][2][2][2][2] = { +#define R(name, f6, f5, f4, f3, f2, f1, f0, flags) \ + [f6][f5][f4][f3][f2][f1][f0] = \ + otx2_ssogws_deq_seg_timeout_ ##name, SSO_RX_ADPTR_ENQ_FASTPATH_FUNC #undef R }; const event_dequeue_burst_t - ssogws_deq_seg_timeout_burst[2][2][2][2][2][2] = { -#define R(n
[dpdk-dev] [PATCH 15/15] crypto/octeontx2: sync inline tag type cfg with Rx adapter configuration
From: Vamsi Attunuru Tag type configuration for the inline processed packets is set during ethdev configuration, it might conflict with tag type configuration done during Rx adapter configuration which would be setup later. This conflict is fixed as part of flow rule creation by updating tag type config of inline same as Rx adapter configured tag type. Signed-off-by: Ankur Dwivedi Signed-off-by: Anoob Joseph Signed-off-by: Archana Muniganti Signed-off-by: Tejasree Kondoj Signed-off-by: Vamsi Attunuru --- drivers/common/octeontx2/otx2_common.h| 2 ++ drivers/crypto/octeontx2/otx2_cryptodev.c | 2 ++ drivers/crypto/octeontx2/otx2_security.c | 28 drivers/crypto/octeontx2/otx2_security.h | 2 ++ drivers/net/octeontx2/otx2_flow.c | 26 ++ 5 files changed, 60 insertions(+) diff --git a/drivers/common/octeontx2/otx2_common.h b/drivers/common/octeontx2/otx2_common.h index 6456c4b..3e53e5b 100644 --- a/drivers/common/octeontx2/otx2_common.h +++ b/drivers/common/octeontx2/otx2_common.h @@ -81,12 +81,14 @@ typedef int (*otx2_sec_eth_ctx_create_t)(struct rte_eth_dev *eth_dev); typedef void (*otx2_sec_eth_ctx_destroy_t)(struct rte_eth_dev *eth_dev); typedef int (*otx2_sec_eth_init_t)(struct rte_eth_dev *eth_dev); typedef void (*otx2_sec_eth_fini_t)(struct rte_eth_dev *eth_dev); +typedef int (*otx2_sec_eth_update_tag_type_t)(struct rte_eth_dev *eth_dev); struct otx2_sec_eth_crypto_idev_ops { otx2_sec_eth_ctx_create_t ctx_create; otx2_sec_eth_ctx_destroy_t ctx_destroy; otx2_sec_eth_init_t init; otx2_sec_eth_fini_t fini; + otx2_sec_eth_update_tag_type_t update_tag_type; }; extern struct otx2_sec_eth_crypto_idev_ops otx2_sec_idev_ops; diff --git a/drivers/crypto/octeontx2/otx2_cryptodev.c b/drivers/crypto/octeontx2/otx2_cryptodev.c index 34feb82..b944a51 100644 --- a/drivers/crypto/octeontx2/otx2_cryptodev.c +++ b/drivers/crypto/octeontx2/otx2_cryptodev.c @@ -160,4 +160,6 @@ RTE_INIT(otx2_cpt_init_log) otx2_sec_idev_ops.ctx_destroy = otx2_sec_eth_ctx_destroy; otx2_sec_idev_ops.init = otx2_sec_eth_init; otx2_sec_idev_ops.fini = otx2_sec_eth_fini; + otx2_sec_idev_ops.update_tag_type = otx2_sec_eth_update_tag_type; + } diff --git a/drivers/crypto/octeontx2/otx2_security.c b/drivers/crypto/octeontx2/otx2_security.c index 7bd5625..25fdc4e 100644 --- a/drivers/crypto/octeontx2/otx2_security.c +++ b/drivers/crypto/octeontx2/otx2_security.c @@ -749,6 +749,34 @@ sec_eth_ipsec_cfg(struct rte_eth_dev *eth_dev, uint8_t tt) } int +otx2_sec_eth_update_tag_type(struct rte_eth_dev *eth_dev) +{ + struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev); + struct otx2_mbox *mbox = dev->mbox; + struct nix_aq_enq_rsp *rsp; + struct nix_aq_enq_req *aq; + int ret; + + aq = otx2_mbox_alloc_msg_nix_aq_enq(mbox); + aq->qidx = 0; /* Read RQ:0 context */ + aq->ctype = NIX_AQ_CTYPE_RQ; + aq->op = NIX_AQ_INSTOP_READ; + + ret = otx2_mbox_process_msg(mbox, (void *)&rsp); + if (ret < 0) { + otx2_err("Could not read RQ context"); + return ret; + } + + /* Update tag type */ + ret = sec_eth_ipsec_cfg(eth_dev, rsp->rq.sso_tt); + if (ret < 0) + otx2_err("Could not update sec eth tag type"); + + return ret; +} + +int otx2_sec_eth_init(struct rte_eth_dev *eth_dev) { const size_t sa_width = sizeof(struct otx2_ipsec_fp_in_sa); diff --git a/drivers/crypto/octeontx2/otx2_security.h b/drivers/crypto/octeontx2/otx2_security.h index e576f67..3ae6dc6 100644 --- a/drivers/crypto/octeontx2/otx2_security.h +++ b/drivers/crypto/octeontx2/otx2_security.h @@ -146,6 +146,8 @@ int otx2_sec_eth_ctx_create(struct rte_eth_dev *eth_dev); void otx2_sec_eth_ctx_destroy(struct rte_eth_dev *eth_dev); +int otx2_sec_eth_update_tag_type(struct rte_eth_dev *eth_dev); + int otx2_sec_eth_init(struct rte_eth_dev *eth_dev); void otx2_sec_eth_fini(struct rte_eth_dev *eth_dev); diff --git a/drivers/net/octeontx2/otx2_flow.c b/drivers/net/octeontx2/otx2_flow.c index f1fb9f9..dea5337 100644 --- a/drivers/net/octeontx2/otx2_flow.c +++ b/drivers/net/octeontx2/otx2_flow.c @@ -299,6 +299,22 @@ flow_free_rss_action(struct rte_eth_dev *eth_dev, return 0; } +static int +flow_update_sec_tt(struct rte_eth_dev *eth_dev, + const struct rte_flow_action actions[]) +{ + int rc = 0; + + for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) { + if (actions->type == RTE_FLOW_ACTION_TYPE_SECURITY) { + if (otx2_sec_idev_ops.update_tag_type != NULL) + rc = otx2_sec_idev_ops.update_tag_type(eth_dev); + break; + } + } + + return rc; +} static int flow_parse_meta_items(__rte_unused struct otx2_parse_state *pst) @@ -491,6 +507,16 @@ otx2_f
[dpdk-dev] [PATCH 04/14] examples/ipsec-secgw: add Rx adapter support
Add Rx adapter support. The event helper init routine will initialize the Rx adapter according to the configuration. If Rx adapter config is not present it will generate a default config. It will check the available eth ports and event queues and map them 1:1. So one eth port will be connected to one event queue. This way event queue ID could be used to figure out the port on which a packet came in. Signed-off-by: Anoob Joseph Signed-off-by: Lukasz Bartosik --- examples/ipsec-secgw/event_helper.c | 289 +++- examples/ipsec-secgw/event_helper.h | 29 2 files changed, 317 insertions(+), 1 deletion(-) diff --git a/examples/ipsec-secgw/event_helper.c b/examples/ipsec-secgw/event_helper.c index d0157f4..f0eca01 100644 --- a/examples/ipsec-secgw/event_helper.c +++ b/examples/ipsec-secgw/event_helper.c @@ -4,10 +4,60 @@ #include #include #include +#include #include #include "event_helper.h" +static int +eh_get_enabled_cores(struct rte_bitmap *eth_core_mask) +{ + int i; + int count = 0; + + RTE_LCORE_FOREACH(i) { + /* Check if this core is enabled in core mask*/ + if (rte_bitmap_get(eth_core_mask, i)) { + /* We have found enabled core */ + count++; + } + } + return count; +} + +static inline unsigned int +eh_get_next_eth_core(struct eventmode_conf *em_conf) +{ + static unsigned int prev_core = -1; + unsigned int next_core; + + /* +* Make sure we have at least one eth core running, else the following +* logic would lead to an infinite loop. +*/ + if (eh_get_enabled_cores(em_conf->eth_core_mask) == 0) { + EH_LOG_ERR("No enabled eth core found"); + return RTE_MAX_LCORE; + } + +get_next_core: + /* Get the next core */ + next_core = rte_get_next_lcore(prev_core, 0, 1); + + /* Check if we have reached max lcores */ + if (next_core == RTE_MAX_LCORE) + return next_core; + + /* Update prev_core */ + prev_core = next_core; + + /* Only some cores are marked as eth cores. Skip others */ + if (!(rte_bitmap_get(em_conf->eth_core_mask, next_core))) + goto get_next_core; + + return next_core; +} + static inline unsigned int eh_get_next_active_core(struct eventmode_conf *em_conf, unsigned int prev_core) { @@ -154,6 +204,87 @@ eh_set_default_conf_link(struct eventmode_conf *em_conf) } static int +eh_set_default_conf_rx_adapter(struct eventmode_conf *em_conf) +{ + struct rx_adapter_connection_info *conn; + struct eventdev_params *eventdev_config; + struct rx_adapter_conf *adapter; + int eventdev_id; + int nb_eth_dev; + int adapter_id; + int conn_id; + int i; + + /* Create one adapter with all eth queues mapped to event queues 1:1 */ + + if (em_conf->nb_eventdev == 0) { + EH_LOG_ERR("No event devs registered"); + return -EINVAL; + } + + /* Get the number of eth devs */ + nb_eth_dev = rte_eth_dev_count_avail(); + + /* Use the first event dev */ + eventdev_config = &(em_conf->eventdev_config[0]); + + /* Get eventdev ID */ + eventdev_id = eventdev_config->eventdev_id; + adapter_id = 0; + + /* Get adapter conf */ + adapter = &(em_conf->rx_adapter[adapter_id]); + + /* Set adapter conf */ + adapter->eventdev_id = eventdev_id; + adapter->adapter_id = adapter_id; + adapter->rx_core_id = eh_get_next_eth_core(em_conf); + + /* +* Map all queues of one eth device (port) to one event +* queue. Each port will have an individual connection. +* +*/ + + /* Make sure there is enough event queues for 1:1 mapping */ + if (nb_eth_dev > eventdev_config->nb_eventqueue) { + EH_LOG_ERR("Not enough event queues for 1:1 mapping " + "[eth devs: %d, event queues: %d]\n", + nb_eth_dev, eventdev_config->nb_eventqueue); + return -EINVAL; + } + + for (i = 0; i < nb_eth_dev; i++) { + + /* Use only the ports enabled */ + if ((em_conf->eth_portmask & (1 << i)) == 0) + continue; + + /* Get the connection id */ + conn_id = adapter->nb_connections; + + /* Get the connection */ + conn = &(adapter->conn[conn_id]); + + /* Set 1:1 mapping between eth ports & event queues*/ + conn->ethdev_id = i; + conn->eventq_id = i; + + /* Add all eth queues of one eth port to one event queue */ + conn->ethdev_rx_qid = -1; + + /* Update no of connections */ + adapter->nb_connections++; + + } + + /* We have setup one adapter */ + em_co
[dpdk-dev] [PATCH 00/14] add eventmode to ipsec-secgw
This series introduces event-mode additions to ipsec-secgw. This effort is based on the proposed changes for l2fwd-event and the additions in l3fwd for event support. With this series, ipsec-secgw would be able to run in eventmode. The worker thread (executing loop) would be receiving events and would be submitting it back to the eventdev after the processing. This way, multicore scaling and h/w assisted scheduling is achieved by making use of the eventdev capabilities. Since the underlying event device would be having varying capabilities, the worker thread could be drafted differently to maximize performance. This series introduces usage of multiple worker threads, among which the one to be used will be determined by the operating conditions and the underlying device capabilities. For example, if an event device - eth device pair has Tx internal port, then application can do tx_adapter_enqueue() instead of regular event_enqueue(). So a thread making an assumption that the device pair has internal port will not be the right solution for another pair. The infrastructure added with these patches aims to help application to have multiple worker threads, there by extracting maximum performance from every device without affecting existing paths/use cases. The eventmode configuration is predefined. All packets reaching one eth port will hit one event queue. All event queues will be mapped to all event ports. So all cores will be able to receive traffic from all ports. When schedule_type is set as RTE_SCHED_TYPE_ORDERED/ATOMIC, event device will ensure the ordering. Ordering would be lost when tried in PARALLEL. Following command line options are introduced, --transfer-mode: to choose between poll mode & event mode --schedule-type: to specify the scheduling type (RTE_SCHED_TYPE_ORDERED/ RTE_SCHED_TYPE_ATOMIC/ RTE_SCHED_TYPE_PARALLEL) --process-dir: outbound/inbound --process-mode: app mode /driver mode The two s/w config options added to ipsec-secgw can be used in benchmarking h/w performance, 1. process-dir : states whether the direction is outbound/inbound. This option aims to avoid an unnecessary check of determining whether inbound/outbound processing need to be done on the packet. For each option a different light weight worker thread would be executed. 2. process-mode: states whether the application has to run in driver mode or app mode. Driver-mode: This mode will have bare minimum changes in the application to support ipsec. There woudn't be any lookup etc done in the application. And for inline-protocol use case, the thread would resemble l2fwd as the ipsec processing would be done entirely in the h/w. This mode can be used to benchmark the raw performance of the h/w. All the application side steps (like lookup) can be redone based on the requirement of the end user. Hence the need for a mode which would report the raw performance. App-mode: This mode will have all the features currently implemented with ipsec-secgw (non librte_ipsec mode). All the lookups etc would follow the existing methods and would report numbers that can be compared against regular ipsec-secgw benchmark numbers. Example commands to execute ipsec-secgw in various modes on OCTEONTX2 platform, #Inbound driver mode ./ipsec-secgw -w 0002:02:00.0,nb_ipsec_in_sa=128 -w 0002:03:00.0,nb_ipsec_in_sa=128 -w 0002:04:00.0,nb_ipsec_in_sa=128 -w 0002:07:00.0,nb_ipsec_in_sa=128 -w 0002:0e:00.0 -w 0002:10:00.1 --log-level=8 -c 0x7 – -P -p 0xf --config "(0,0,0),(1,0,0),(2,0,0),(3,0,0)" -f dpdk_internal/100g_4.3.cfg --transfer-mode 1 --schedule-type 2 --process-mode app --process-dir in #Inbound app mode ./ipsec-secgw -w 0002:02:00.0,nb_ipsec_in_sa=128 -w 0002:03:00.0,nb_ipsec_in_sa=128 -w 0002:04:00.0,nb_ipsec_in_sa=128 -w 0002:07:00.0,nb_ipsec_in_sa=128 -w 0002:0e:00.0 -w 0002:10:00.1 --log-level=8 -c 0x3f – -P -p 0xf --config "(0,0,0),(1,0,0),(2,0,0),(3,0,0)" -f dpdk_internal/100g_4.3.cfg --transfer-mode 1 --schedule-type 2 --process-mode drv --process-dir in #Outbound driver mode ./ipsec-secgw -w 0002:02:00.0 -w 0002:03:00.0 -w 0002:04:00.0 -w 0002:07:00.0 -w 0002:0e:00.0 -w 0002:10:00.1 --log-level=8 -c 0x1f – -P -p 0xf --config "(0,0,0),(1,0,0),(2,0,0),(3,0,0)" -f a-aes-gcm-new.cfg --transfer-mode 1 --schedule-type 2 --process-mode app --process-dir out #Outbound app mode ./ipsec-secgw -w 0002:02:00.0 -w 0002:03:00.0 -w 0002:04:00.0 -w 0002:07:00.0 -w 0002:0e:00.0 -w 0002:10:00.1 --log-level=8 -c 0x7f – -P -p 0xf --config "(0,0,0),(1,0,0),(2,0,0),(3,0,0)" -f a-aes-gcm-new.cfg --transfer-mode 1 --schedule-type 2 --process-mode drv --process-dir out This series doesn't introduce any library change. And the decision to add eventmode additions in ipsec-secgw was approved by the Tech Board. This series
[dpdk-dev] [PATCH 01/14] examples/ipsec-secgw: add default rte_flow for inline Rx
From: Ankur Dwivedi The default flow created would enable security processing on all ESP packets. If the default flow is created, SA based rte_flow creation would be skipped. Signed-off-by: Ankur Dwivedi Signed-off-by: Anoob Joseph --- examples/ipsec-secgw/ipsec-secgw.c | 56 ++ examples/ipsec-secgw/ipsec.c | 8 ++ examples/ipsec-secgw/ipsec.h | 6 3 files changed, 70 insertions(+) diff --git a/examples/ipsec-secgw/ipsec-secgw.c b/examples/ipsec-secgw/ipsec-secgw.c index 3b5aaf6..7506922 100644 --- a/examples/ipsec-secgw/ipsec-secgw.c +++ b/examples/ipsec-secgw/ipsec-secgw.c @@ -128,6 +128,8 @@ struct ethaddr_info ethaddr_tbl[RTE_MAX_ETHPORTS] = { { 0, ETHADDR(0x00, 0x16, 0x3e, 0x49, 0x9e, 0xdd) } }; +struct flow_info flow_info_tbl[RTE_MAX_ETHPORTS]; + #define CMD_LINE_OPT_CONFIG"config" #define CMD_LINE_OPT_SINGLE_SA "single-sa" #define CMD_LINE_OPT_CRYPTODEV_MASK"cryptodev_mask" @@ -2406,6 +2408,55 @@ reassemble_init(void) return rc; } +static int +create_default_ipsec_flow(uint16_t port_id, uint64_t rx_offloads) +{ + int ret = 0; + + /* Add the default ipsec flow to detect all ESP packets for rx */ + if (rx_offloads & DEV_RX_OFFLOAD_SECURITY) { + struct rte_flow_action action[2]; + struct rte_flow_item pattern[2]; + struct rte_flow_attr attr = {0}; + struct rte_flow_error err; + struct rte_flow *flow; + + pattern[0].type = RTE_FLOW_ITEM_TYPE_ESP; + pattern[0].spec = NULL; + pattern[0].mask = NULL; + pattern[0].last = NULL; + pattern[1].type = RTE_FLOW_ITEM_TYPE_END; + + action[0].type = RTE_FLOW_ACTION_TYPE_SECURITY; + action[0].conf = NULL; + action[1].type = RTE_FLOW_ACTION_TYPE_END; + action[1].conf = NULL; + + attr.egress = 0; + attr.ingress = 1; + + ret = rte_flow_validate(port_id, &attr, pattern, action, &err); + if (ret) { + RTE_LOG(ERR, IPSEC, + "Failed to validate ipsec flow %s\n", + err.message); + goto exit; + } + + flow = rte_flow_create(port_id, &attr, pattern, action, &err); + if (flow == NULL) { + RTE_LOG(ERR, IPSEC, + "Failed to create ipsec flow %s\n", + err.message); + ret = -rte_errno; + goto exit; + } + flow_info_tbl[port_id].rx_def_flow = flow; + } +exit: + return ret; +} + int32_t main(int32_t argc, char **argv) { @@ -2478,6 +2529,11 @@ main(int32_t argc, char **argv) sa_check_offloads(portid, &req_rx_offloads, &req_tx_offloads); port_init(portid, req_rx_offloads, req_tx_offloads); + /* Create default ipsec flow for the ethernet device */ + ret = create_default_ipsec_flow(portid, req_rx_offloads); + if (ret) + printf("Cannot create default flow, err=%d, port=%d\n", + ret, portid); } cryptodevs_init(); diff --git a/examples/ipsec-secgw/ipsec.c b/examples/ipsec-secgw/ipsec.c index d4b5712..e529f68 100644 --- a/examples/ipsec-secgw/ipsec.c +++ b/examples/ipsec-secgw/ipsec.c @@ -261,6 +261,12 @@ create_inline_session(struct socket_ctx *skt_ctx, struct ipsec_sa *sa, unsigned int i; unsigned int j; + /* +* Don't create flow if default flow is already created +*/ + if (flow_info_tbl[sa->portid].rx_def_flow) + goto set_cdev_id; + ret = rte_eth_dev_info_get(sa->portid, &dev_info); if (ret != 0) { RTE_LOG(ERR, IPSEC, @@ -396,6 +402,8 @@ create_inline_session(struct socket_ctx *skt_ctx, struct ipsec_sa *sa, ips->security.ol_flags = sec_cap->ol_flags; ips->security.ctx = sec_ctx; } + +set_cdev_id: sa->cdev_id_qp = 0; return 0; diff --git a/examples/ipsec-secgw/ipsec.h b/examples/ipsec-secgw/ipsec.h index 8e07521..28ff07d 100644 --- a/examples/ipsec-secgw/ipsec.h +++ b/examples/ipsec-secgw/ipsec.h @@ -81,6 +81,12 @@ struct app_sa_prm { extern struct app_sa_prm app_sa_prm; +struct flow_info { + struct rte_flow *rx_def_flow; +}; + +extern struct flow_info flow_info_tbl[RTE_MAX_ETHPORTS]; + enum { IPSEC_SESSION_PRIMARY = 0, IPSEC_SESSION_FALLBACK = 1, -- 2.7.4
[dpdk-dev] [PATCH 02/14] examples/ipsec-secgw: add framework for eventmode helper
Add framework for eventmode helper. Event mode would involve initialization of multiple devices, like eventdev, ethdev etc. Add routines to initialize and uninitialize event devices. Generate a default config for event devices if it is not specified in the configuration. The init routine will iterate over available event devices and their properties and will set the config accordingly. Signed-off-by: Anoob Joseph Signed-off-by: Lukasz Bartosik --- examples/ipsec-secgw/Makefile | 1 + examples/ipsec-secgw/event_helper.c | 311 examples/ipsec-secgw/event_helper.h | 115 + examples/ipsec-secgw/meson.build| 4 +- 4 files changed, 429 insertions(+), 2 deletions(-) create mode 100644 examples/ipsec-secgw/event_helper.c create mode 100644 examples/ipsec-secgw/event_helper.h diff --git a/examples/ipsec-secgw/Makefile b/examples/ipsec-secgw/Makefile index a4977f6..09e3c5a 100644 --- a/examples/ipsec-secgw/Makefile +++ b/examples/ipsec-secgw/Makefile @@ -15,6 +15,7 @@ SRCS-y += sa.c SRCS-y += rt.c SRCS-y += ipsec_process.c SRCS-y += ipsec-secgw.c +SRCS-y += event_helper.c CFLAGS += -gdwarf-2 diff --git a/examples/ipsec-secgw/event_helper.c b/examples/ipsec-secgw/event_helper.c new file mode 100644 index 000..b11e861 --- /dev/null +++ b/examples/ipsec-secgw/event_helper.c @@ -0,0 +1,311 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright (C) 2019 Marvell International Ltd. + */ +#include +#include + +#include "event_helper.h" + +static int +eh_set_default_conf_eventdev(struct eventmode_conf *em_conf) +{ + struct eventdev_params *eventdev_config; + struct rte_event_dev_info dev_info; + int nb_eventdev; + int i, ret; + + /* Get the number of event devices */ + nb_eventdev = rte_event_dev_count(); + + if (nb_eventdev == 0) { + EH_LOG_ERR("No event devices detected"); + return -EINVAL; + } + + for (i = 0; i < nb_eventdev; i++) { + + /* Get the event dev conf */ + eventdev_config = &(em_conf->eventdev_config[i]); + + /* Read event device info */ + ret = rte_event_dev_info_get(i, &dev_info); + + if (ret < 0) { + EH_LOG_ERR("Failed to read event device info %d", ret); + return ret; + } + + /* Check if enough ports are available */ + if (dev_info.max_event_ports < 2) { + EH_LOG_ERR("Not enough event ports available"); + return -EINVAL; + } + + /* Save number of queues & ports available */ + eventdev_config->eventdev_id = i; + eventdev_config->nb_eventqueue = dev_info.max_event_queues; + eventdev_config->nb_eventport = dev_info.max_event_ports; + eventdev_config->ev_queue_mode = + RTE_EVENT_QUEUE_CFG_ALL_TYPES; + + /* One port is required for eth Rx adapter */ + eventdev_config->nb_eventport -= 1; + + /* One port is reserved for eth Tx adapter */ + eventdev_config->nb_eventport -= 1; + + /* Update the number of event devices */ + em_conf->nb_eventdev++; + } + + return 0; +} + +static int +eh_validate_conf(struct eventmode_conf *em_conf) +{ + int ret; + + /* +* Check if event devs are specified. Else probe the event devices +* and initialize the config with all ports & queues available +*/ + if (em_conf->nb_eventdev == 0) { + ret = eh_set_default_conf_eventdev(em_conf); + if (ret != 0) + return ret; + } + + return 0; +} + +static int +eh_initialize_eventdev(struct eventmode_conf *em_conf) +{ + struct rte_event_queue_conf eventq_conf = {0}; + struct rte_event_dev_info evdev_default_conf; + struct rte_event_dev_config eventdev_conf; + struct eventdev_params *eventdev_config; + int nb_eventdev = em_conf->nb_eventdev; + uint8_t eventdev_id; + int nb_eventqueue; + uint8_t i, j; + int ret; + + for (i = 0; i < nb_eventdev; i++) { + + /* Get eventdev config */ + eventdev_config = &(em_conf->eventdev_config[i]); + + /* Get event dev ID */ + eventdev_id = eventdev_config->eventdev_id; + + /* Get the number of queues */ + nb_eventqueue = eventdev_config->nb_eventqueue; + + /* One queue is reserved for the final stage (doing eth tx) */ + nb_eventqueue += 1; + + /* Reset the default conf */ + memset(&evdev_default_conf, 0, + sizeof(struct rte_event_dev_info)); + + /* Get default conf of eventdev */ + ret = rte_event_dev
[dpdk-dev] [PATCH 05/14] examples/ipsec-secgw: add Tx adapter support
Add Tx adapter support. The event helper init routine will initialize the Tx adapter according to the configuration. If Tx adapter config is not present it will generate a default config. Signed-off-by: Anoob Joseph Signed-off-by: Lukasz Bartosik --- examples/ipsec-secgw/event_helper.c | 326 examples/ipsec-secgw/event_helper.h | 48 ++ 2 files changed, 374 insertions(+) diff --git a/examples/ipsec-secgw/event_helper.c b/examples/ipsec-secgw/event_helper.c index f0eca01..9c07cc7 100644 --- a/examples/ipsec-secgw/event_helper.c +++ b/examples/ipsec-secgw/event_helper.c @@ -5,6 +5,7 @@ #include #include #include +#include #include #include "event_helper.h" @@ -80,6 +81,22 @@ eh_get_next_active_core(struct eventmode_conf *em_conf, unsigned int prev_core) return next_core; } +static struct eventdev_params * +eh_get_eventdev_params(struct eventmode_conf *em_conf, uint8_t eventdev_id) +{ + int i; + + for (i = 0; i < em_conf->nb_eventdev; i++) { + if (em_conf->eventdev_config[i].eventdev_id == eventdev_id) + break; + } + + /* No match */ + if (i == em_conf->nb_eventdev) + return NULL; + + return &(em_conf->eventdev_config[i]); +} static int eh_set_default_conf_eventdev(struct eventmode_conf *em_conf) { @@ -285,6 +302,99 @@ eh_set_default_conf_rx_adapter(struct eventmode_conf *em_conf) } static int +eh_set_default_conf_tx_adapter(struct eventmode_conf *em_conf) +{ + struct tx_adapter_connection_info *conn; + struct eventdev_params *eventdev_config; + struct tx_adapter_conf *tx_adapter; + int eventdev_id; + int adapter_id; + int nb_eth_dev; + int conn_id; + int i; + + /* +* Create one Tx adapter with all eth queues mapped to event queues +* 1:1. +*/ + + if (em_conf->nb_eventdev == 0) { + EH_LOG_ERR("No event devs registered"); + return -EINVAL; + } + + /* Get the number of eth devs */ + nb_eth_dev = rte_eth_dev_count_avail(); + + /* Use the first event dev */ + eventdev_config = &(em_conf->eventdev_config[0]); + + /* Get eventdev ID */ + eventdev_id = eventdev_config->eventdev_id; + adapter_id = 0; + + /* Get adapter conf */ + tx_adapter = &(em_conf->tx_adapter[adapter_id]); + + /* Set adapter conf */ + tx_adapter->eventdev_id = eventdev_id; + tx_adapter->adapter_id = adapter_id; + + /* TODO: Tx core is required only when internal port is not present */ + + tx_adapter->tx_core_id = eh_get_next_eth_core(em_conf); + + /* +* Application uses one event queue per adapter for submitting +* packets for Tx. Reserve the last queue available and decrement +* the total available event queues for this +*/ + + /* Queue numbers start at 0 */ + tx_adapter->tx_ev_queue = eventdev_config->nb_eventqueue - 1; + + /* Update the number of event queues available in eventdev */ + eventdev_config->nb_eventqueue--; + + /* +* Map all Tx queues of the eth device (port) to the event device. +*/ + + /* Set defaults for connections */ + + /* +* One eth device (port) is one connection. Map all Tx queues +* of the device to the Tx adapter. +*/ + + for (i = 0; i < nb_eth_dev; i++) { + + /* Use only the ports enabled */ + if ((em_conf->eth_portmask & (1 << i)) == 0) + continue; + + /* Get the connection id */ + conn_id = tx_adapter->nb_connections; + + /* Get the connection */ + conn = &(tx_adapter->conn[conn_id]); + + /* Add ethdev to connections */ + conn->ethdev_id = i; + + /* Add all eth tx queues to adapter */ + conn->ethdev_tx_qid = -1; + + /* Update no of connections */ + tx_adapter->nb_connections++; + } + + /* We have setup one adapter */ + em_conf->nb_tx_adapter = 1; + return 0; +} + +static int eh_validate_conf(struct eventmode_conf *em_conf) { int ret; @@ -319,6 +429,16 @@ eh_validate_conf(struct eventmode_conf *em_conf) return ret; } + /* +* Check if tx adapters are specified. Else generate a default config +* with one tx adapter. +*/ + if (em_conf->nb_tx_adapter == 0) { + ret = eh_set_default_conf_tx_adapter(em_conf); + if (ret != 0) + return ret; + } + return 0; } @@ -584,6 +704,142 @@ eh_initialize_rx_adapter(struct eventmode_conf *em_conf) return 0; } +static int +eh_tx_adapter_configure(struct eventmode_conf *em_conf, + struct tx_adapter_conf *adapter) +{ + struct rte_e
[dpdk-dev] [PATCH 07/14] examples/ipsec-secgw: add routines to launch workers
From: Lukasz Bartosik With eventmode, workers can be drafted differently according to the capabilities of the underlying event device. The added functions will receive an array of such workers and probe the eventmode properties to choose the worker. Signed-off-by: Anoob Joseph Signed-off-by: Lukasz Bartosik --- examples/ipsec-secgw/event_helper.c | 340 examples/ipsec-secgw/event_helper.h | 48 + 2 files changed, 388 insertions(+) diff --git a/examples/ipsec-secgw/event_helper.c b/examples/ipsec-secgw/event_helper.c index f120e43..a67132a 100644 --- a/examples/ipsec-secgw/event_helper.c +++ b/examples/ipsec-secgw/event_helper.c @@ -7,9 +7,12 @@ #include #include #include +#include #include "event_helper.h" +static volatile bool eth_core_running; + static int eh_get_enabled_cores(struct rte_bitmap *eth_core_mask) { @@ -97,6 +100,16 @@ eh_get_eventdev_params(struct eventmode_conf *em_conf, uint8_t eventdev_id) return &(em_conf->eventdev_config[i]); } +static inline bool +eh_dev_has_burst_mode(uint8_t dev_id) +{ + struct rte_event_dev_info dev_info; + + rte_event_dev_info_get(dev_id, &dev_info); + return (dev_info.event_dev_cap & RTE_EVENT_DEV_CAP_BURST_MODE) ? + true : false; +} + static int eh_set_default_conf_eventdev(struct eventmode_conf *em_conf) { @@ -704,6 +717,260 @@ eh_initialize_rx_adapter(struct eventmode_conf *em_conf) return 0; } +static int32_t +eh_start_worker_eth_core(struct eventmode_conf *conf, uint32_t lcore_id) +{ + uint32_t service_id[EVENT_MODE_MAX_ADAPTERS_PER_RX_CORE]; + struct rx_adapter_conf *rx_adapter; + struct tx_adapter_conf *tx_adapter; + int service_count = 0; + int adapter_id; + int32_t ret; + int i; + + EH_LOG_INFO("Entering eth_core processing on lcore %u", lcore_id); + + /* +* Parse adapter config to check which of all Rx adapters need +* to be handled by this core. +*/ + for (i = 0; i < conf->nb_rx_adapter; i++) { + /* Check if we have exceeded the max allowed */ + if (service_count > EVENT_MODE_MAX_ADAPTERS_PER_RX_CORE) { + EH_LOG_ERR( + "Exceeded the max allowed adapters per rx core"); + break; + } + + rx_adapter = &(conf->rx_adapter[i]); + if (rx_adapter->rx_core_id != lcore_id) + continue; + + /* Adapter is handled by this core */ + adapter_id = rx_adapter->adapter_id; + + /* Get the service ID for the adapters */ + ret = rte_event_eth_rx_adapter_service_id_get(adapter_id, + &(service_id[service_count])); + + if (ret != -ESRCH && ret < 0) { + EH_LOG_ERR( + "Failed to get service id used by rx adapter"); + return ret; + } + + /* Update service count */ + service_count++; + } + + /* +* Parse adapter config to see which all Tx adapters need +* to be handled this core. +*/ + for (i = 0; i < conf->nb_tx_adapter; i++) { + /* Check if we have exceeded the max allowed */ + if (service_count > EVENT_MODE_MAX_ADAPTERS_PER_TX_CORE) { + EH_LOG_ERR( + "Exceeded the max allowed adapters per tx core"); + break; + } + + tx_adapter = &conf->tx_adapter[i]; + if (tx_adapter->tx_core_id != lcore_id) + continue; + + /* Adapter is handled by this core */ + adapter_id = tx_adapter->adapter_id; + + /* Get the service ID for the adapters */ + ret = rte_event_eth_tx_adapter_service_id_get(adapter_id, + &(service_id[service_count])); + + if (ret != -ESRCH && ret < 0) { + EH_LOG_ERR( + "Failed to get service id used by tx adapter"); + return ret; + } + + /* Update service count */ + service_count++; + } + + eth_core_running = true; + + while (eth_core_running) { + for (i = 0; i < service_count; i++) { + /* Initiate adapter service */ + rte_service_run_iter_on_app_lcore(service_id[i], 0); + } + } + + return 0; +} + +static int32_t +eh_stop_worker_eth_core(void) +{ + if (eth_core_running) { + EH_LOG_INFO("Stopping eth cores"); + eth_core_running = false; + } + return 0; +} + +static struct eh_app_worker_params * +eh_find_worker(uint32_t lcore_id, struc
[dpdk-dev] [PATCH 03/14] examples/ipsec-secgw: add eventdev port-lcore link
Add event device port-lcore link and specify which event queues should be connected to the event port. Generate a default config for event port-lcore links if it is not specified in the configuration. This routine will check the number of available ports and then create links according to the number of cores available. This patch also adds a new entry in the eventmode conf to denote that all queues are to be linked with every port. This enables one core to receive packets fromall ethernet ports. Signed-off-by: Anoob Joseph Signed-off-by: Lukasz Bartosik --- examples/ipsec-secgw/event_helper.c | 131 examples/ipsec-secgw/event_helper.h | 33 + 2 files changed, 164 insertions(+) diff --git a/examples/ipsec-secgw/event_helper.c b/examples/ipsec-secgw/event_helper.c index b11e861..d0157f4 100644 --- a/examples/ipsec-secgw/event_helper.c +++ b/examples/ipsec-secgw/event_helper.c @@ -1,11 +1,35 @@ /* SPDX-License-Identifier: BSD-3-Clause * Copyright (C) 2019 Marvell International Ltd. */ +#include #include #include +#include #include "event_helper.h" +static inline unsigned int +eh_get_next_active_core(struct eventmode_conf *em_conf, unsigned int prev_core) +{ + unsigned int next_core; + +get_next_core: + /* Get the next core */ + next_core = rte_get_next_lcore(prev_core, 0, 0); + + /* Check if we have reached max lcores */ + if (next_core == RTE_MAX_LCORE) + return next_core; + + /* Skip cores reserved as eth cores */ + if (rte_bitmap_get(em_conf->eth_core_mask, next_core)) { + prev_core = next_core; + goto get_next_core; + } + + return next_core; +} + static int eh_set_default_conf_eventdev(struct eventmode_conf *em_conf) { @@ -62,6 +86,74 @@ eh_set_default_conf_eventdev(struct eventmode_conf *em_conf) } static int +eh_set_default_conf_link(struct eventmode_conf *em_conf) +{ + struct eventdev_params *eventdev_config; + struct eh_event_link_info *link; + unsigned int lcore_id = -1; + int link_index; + int i, j; + + /* +* Create a 1:1 mapping from event ports to cores. If the number +* of event ports is lesser than the cores, some cores won't +* execute worker. If there are more event ports, then some ports +* won't be used. +* +*/ + + /* +* The event queue-port mapping is done according to the link. Since +* we are falling back to the default link config, enabling +* "all_ev_queue_to_ev_port" mode flag. This will map all queues +* to the port. +*/ + em_conf->ext_params.all_ev_queue_to_ev_port = 1; + + for (i = 0; i < em_conf->nb_eventdev; i++) { + + /* Get event dev conf */ + eventdev_config = &(em_conf->eventdev_config[i]); + + /* Loop through the ports */ + for (j = 0; j < eventdev_config->nb_eventport; j++) { + + /* Get next active core id */ + lcore_id = eh_get_next_active_core(em_conf, + lcore_id); + + if (lcore_id == RTE_MAX_LCORE) { + /* Reached max cores */ + return 0; + } + + /* Save the current combination as one link */ + + /* Get the index */ + link_index = em_conf->nb_link; + + /* Get the corresponding link */ + link = &(em_conf->link[link_index]); + + /* Save link */ + link->eventdev_id = eventdev_config->eventdev_id; + link->event_port_id = j; + link->lcore_id = lcore_id; + + /* +* Don't set eventq_id as by default all queues +* need to be mapped to the port, which is controlled +* by the operating mode. +*/ + + /* Update number of links */ + em_conf->nb_link++; + } + } + return 0; +} + +static int eh_validate_conf(struct eventmode_conf *em_conf) { int ret; @@ -76,6 +168,16 @@ eh_validate_conf(struct eventmode_conf *em_conf) return ret; } + /* +* Check if links are specified. Else generate a default config for +* the event ports used. +*/ + if (em_conf->nb_link == 0) { + ret = eh_set_default_conf_link(em_conf); + if (ret != 0) + return ret; + } + return 0; } @@ -87,6 +189,8 @@ eh_initialize_eventdev(struct eventmode_conf *em_conf) struct rte_event_dev_config eventdev_conf; struct eventdev_params *eventdev_config;
[dpdk-dev] [PATCH 06/14] examples/ipsec-secgw: add routines to display config
Add routines to display the eventmode configuration. This gives an overview of the devices used. Signed-off-by: Anoob Joseph Signed-off-by: Lukasz Bartosik --- examples/ipsec-secgw/event_helper.c | 207 examples/ipsec-secgw/event_helper.h | 14 +++ 2 files changed, 221 insertions(+) diff --git a/examples/ipsec-secgw/event_helper.c b/examples/ipsec-secgw/event_helper.c index 9c07cc7..f120e43 100644 --- a/examples/ipsec-secgw/event_helper.c +++ b/examples/ipsec-secgw/event_helper.c @@ -840,6 +840,210 @@ eh_initialize_tx_adapter(struct eventmode_conf *em_conf) return 0; } +static void +eh_display_operating_mode(struct eventmode_conf *em_conf) +{ + char sched_types[][32] = { + "RTE_SCHED_TYPE_ORDERED", + "RTE_SCHED_TYPE_ATOMIC", + "RTE_SCHED_TYPE_PARALLEL", + }; + EH_LOG_INFO("Operating mode:"); + + EH_LOG_INFO("\tScheduling type: \t%s", + sched_types[em_conf->ext_params.sched_type]); + + EH_LOG_INFO(""); +} + +static void +eh_display_event_dev_conf(struct eventmode_conf *em_conf) +{ + char queue_mode[][32] = { + "", + "ATQ (ALL TYPE QUEUE)", + "SINGLE LINK", + }; + char print_buf[256] = { 0 }; + int i; + + EH_LOG_INFO("Event Device Configuration:"); + + for (i = 0; i < em_conf->nb_eventdev; i++) { + sprintf(print_buf, + "\tDev ID: %-2d \tQueues: %-2d \tPorts: %-2d", + em_conf->eventdev_config[i].eventdev_id, + em_conf->eventdev_config[i].nb_eventqueue, + em_conf->eventdev_config[i].nb_eventport); + sprintf(print_buf + strlen(print_buf), + "\tQueue mode: %s", + queue_mode[em_conf->eventdev_config[i].ev_queue_mode]); + EH_LOG_INFO("%s", print_buf); + } + EH_LOG_INFO(""); +} + +static void +eh_display_rx_adapter_conf(struct eventmode_conf *em_conf) +{ + int nb_rx_adapter = em_conf->nb_rx_adapter; + struct rx_adapter_connection_info *conn; + struct rx_adapter_conf *adapter; + char print_buf[256] = { 0 }; + int i, j; + + EH_LOG_INFO("Rx adapters configured: %d", nb_rx_adapter); + + for (i = 0; i < nb_rx_adapter; i++) { + adapter = &(em_conf->rx_adapter[i]); + EH_LOG_INFO( + "\tRx adaper ID: %-2d\tConnections: %-2d\tEvent dev ID: %-2d" + "\tRx core: %-2d", + adapter->adapter_id, + adapter->nb_connections, + adapter->eventdev_id, + adapter->rx_core_id); + + for (j = 0; j < adapter->nb_connections; j++) { + conn = &(adapter->conn[j]); + + sprintf(print_buf, + "\t\tEthdev ID: %-2d", conn->ethdev_id); + + if (conn->ethdev_rx_qid == -1) + sprintf(print_buf + strlen(print_buf), + "\tEth rx queue: %-2s", "ALL"); + else + sprintf(print_buf + strlen(print_buf), + "\tEth rx queue: %-2d", + conn->ethdev_rx_qid); + + sprintf(print_buf + strlen(print_buf), + "\tEvent queue: %-2d", conn->eventq_id); + EH_LOG_INFO("%s", print_buf); + } + } + EH_LOG_INFO(""); +} + +static void +eh_display_tx_adapter_conf(struct eventmode_conf *em_conf) +{ + int nb_tx_adapter = em_conf->nb_tx_adapter; + struct tx_adapter_connection_info *conn; + struct tx_adapter_conf *adapter; + char print_buf[256] = { 0 }; + int i, j; + + EH_LOG_INFO("Tx adapters configured: %d", nb_tx_adapter); + + for (i = 0; i < nb_tx_adapter; i++) { + adapter = &(em_conf->tx_adapter[i]); + sprintf(print_buf, + "\tTx adapter ID: %-2d\tConnections: %-2d\tEvent dev ID: %-2d", + adapter->adapter_id, + adapter->nb_connections, + adapter->eventdev_id); + if (adapter->tx_core_id == (uint32_t)-1) + sprintf(print_buf + strlen(print_buf), + "\tTx core: %-2s", "[INTERNAL PORT]"); + else if (adapter->tx_core_id == RTE_MAX_LCORE) + sprintf(print_buf + strlen(print_buf), + "\tTx core: %-2s", "[NONE]"); + else + sprintf(print_buf + strlen(print_buf), + "\tTx core: %-2d,\tInput event queue: %-2d", + adapter->tx_core_i
[dpdk-dev] [PATCH 10/14] examples/ipsec-secgw: add app inbound worker
From: Lukasz Bartosik Add application inbound worker thread. Signed-off-by: Anoob Joseph Signed-off-by: Lukasz Bartosik --- examples/ipsec-secgw/ipsec_worker.c | 85 - 1 file changed, 84 insertions(+), 1 deletion(-) diff --git a/examples/ipsec-secgw/ipsec_worker.c b/examples/ipsec-secgw/ipsec_worker.c index 87c657b..fce274a 100644 --- a/examples/ipsec-secgw/ipsec_worker.c +++ b/examples/ipsec-secgw/ipsec_worker.c @@ -52,7 +52,7 @@ ipsec_event_pre_forward(struct rte_mbuf *m, unsigned int port_id) */ /* Workers registered */ -#define IPSEC_EVENTMODE_WORKERS1 +#define IPSEC_EVENTMODE_WORKERS2 /* * Event mode worker @@ -126,6 +126,79 @@ ipsec_wrkr_non_burst_int_port_drvr_mode_inb(struct eh_event_link_info *links, return; } +/* + * Event mode worker + * Operating parameters : non-burst - Tx internal port - app mode - inbound + */ +static void +ipsec_wrkr_non_burst_int_port_app_mode_inb(struct eh_event_link_info *links, + uint8_t nb_links) +{ + unsigned int nb_rx = 0; + unsigned int port_id; + struct rte_mbuf *pkt; + struct rte_event ev; + uint32_t lcore_id; + + /* Check if we have links registered for this lcore */ + if (nb_links == 0) { + /* No links registered - exit */ + goto exit; + } + + /* We have valid links */ + + /* Get core ID */ + lcore_id = rte_lcore_id(); + + RTE_LOG(INFO, IPSEC, + "Launching event mode worker (non-burst - Tx internal port - " + "app mode - inbound) on lcore %d\n", lcore_id); + + /* Check if it's single link */ + if (nb_links != 1) { + RTE_LOG(INFO, IPSEC, + "Multiple links not supported. Using first link\n"); + } + + RTE_LOG(INFO, IPSEC, " -- lcoreid=%u event_port_id=%u\n", lcore_id, + links[0].event_port_id); + + while (!force_quit) { + /* Read packet from event queues */ + nb_rx = rte_event_dequeue_burst(links[0].eventdev_id, + links[0].event_port_id, + &ev, /* events */ + 1, /* nb_events */ + 0/* timeout_ticks */); + + if (nb_rx == 0) + continue; + + port_id = ev.queue_id; + pkt = ev.mbuf; + + rte_prefetch0(rte_pktmbuf_mtod(pkt, void *)); + + /* Process packet */ + ipsec_event_pre_forward(pkt, port_id); + + /* +* Since tx internal port is available, events can be +* directly enqueued to the adapter and it would be +* internally submitted to the eth device. +*/ + rte_event_eth_tx_adapter_enqueue(links[0].eventdev_id, + links[0].event_port_id, + &ev,/* events */ + 1, /* nb_events */ + 0 /* flags */); + } + +exit: + return; +} + static uint8_t ipsec_eventmode_populate_wrkr_params(struct eh_app_worker_params *wrkrs) { @@ -142,6 +215,16 @@ ipsec_eventmode_populate_wrkr_params(struct eh_app_worker_params *wrkrs) wrkr->cap.ipsec_dir = EH_IPSEC_DIR_TYPE_INBOUND; wrkr->worker_thread = ipsec_wrkr_non_burst_int_port_drvr_mode_inb; + wrkr++; + nb_wrkr_param++; + + /* Non-burst - Tx internal port - app mode - inbound */ + wrkr->cap.burst = EH_RX_TYPE_NON_BURST; + wrkr->cap.tx_internal_port = EH_TX_TYPE_INTERNAL_PORT; + wrkr->cap.ipsec_mode = EH_IPSEC_MODE_TYPE_APP; + wrkr->cap.ipsec_dir = EH_IPSEC_DIR_TYPE_INBOUND; + wrkr->worker_thread = ipsec_wrkr_non_burst_int_port_app_mode_inb; + nb_wrkr_param++; return nb_wrkr_param; } -- 2.7.4
[dpdk-dev] [PATCH 09/14] examples/ipsec-secgw: add eventmode to ipsec-secgw
From: Lukasz Bartosik Add eventmode support to ipsec-secgw. This uses event helper to setup and use the eventmode capabilities. Add driver inbound worker. Example command: ./ipsec-secgw -c 0x1 -w 0002:02:00.0,ipsec_in_max_spi=100 -w 0002:07:00.0 -w 0002:0e:00.0 -w 0002:10:00.1 -- -P -p 0x3 -u 0x1 --config "(0,0,0),(1,0,0)" -f a-aes-gcm-msa.cfg --transfer-mode 1 --schedule-type 2 --process-mode drv --process-dir in Signed-off-by: Anoob Joseph Signed-off-by: Lukasz Bartosik --- examples/ipsec-secgw/Makefile | 1 + examples/ipsec-secgw/event_helper.c | 3 + examples/ipsec-secgw/event_helper.h | 26 +++ examples/ipsec-secgw/ipsec-secgw.c | 344 +++- examples/ipsec-secgw/ipsec.h| 7 + examples/ipsec-secgw/ipsec_worker.c | 180 +++ examples/ipsec-secgw/meson.build| 2 +- 7 files changed, 555 insertions(+), 8 deletions(-) create mode 100644 examples/ipsec-secgw/ipsec_worker.c diff --git a/examples/ipsec-secgw/Makefile b/examples/ipsec-secgw/Makefile index 09e3c5a..f6fd94c 100644 --- a/examples/ipsec-secgw/Makefile +++ b/examples/ipsec-secgw/Makefile @@ -15,6 +15,7 @@ SRCS-y += sa.c SRCS-y += rt.c SRCS-y += ipsec_process.c SRCS-y += ipsec-secgw.c +SRCS-y += ipsec_worker.c SRCS-y += event_helper.c CFLAGS += -gdwarf-2 diff --git a/examples/ipsec-secgw/event_helper.c b/examples/ipsec-secgw/event_helper.c index 6549875..44f997d 100644 --- a/examples/ipsec-secgw/event_helper.c +++ b/examples/ipsec-secgw/event_helper.c @@ -984,6 +984,9 @@ eh_find_worker(uint32_t lcore_id, struct eh_conf *conf, else curr_conf.cap.burst = EH_RX_TYPE_NON_BURST; + curr_conf.cap.ipsec_mode = conf->ipsec_mode; + curr_conf.cap.ipsec_dir = conf->ipsec_dir; + /* Parse the passed list and see if we have matching capabilities */ /* Initialize the pointer used to traverse the list */ diff --git a/examples/ipsec-secgw/event_helper.h b/examples/ipsec-secgw/event_helper.h index 2895dfa..07849b0 100644 --- a/examples/ipsec-secgw/event_helper.h +++ b/examples/ipsec-secgw/event_helper.h @@ -74,6 +74,22 @@ enum eh_tx_types { EH_TX_TYPE_NO_INTERNAL_PORT }; +/** + * Event mode ipsec mode types + */ +enum eh_ipsec_mode_types { + EH_IPSEC_MODE_TYPE_APP = 0, + EH_IPSEC_MODE_TYPE_DRIVER +}; + +/** + * Event mode ipsec direction types + */ +enum eh_ipsec_dir_types { + EH_IPSEC_DIR_TYPE_OUTBOUND = 0, + EH_IPSEC_DIR_TYPE_INBOUND, +}; + /* Event dev params */ struct eventdev_params { uint8_t eventdev_id; @@ -183,6 +199,12 @@ struct eh_conf { */ void *mode_params; /**< Mode specific parameters */ + + /** Application specific params */ + enum eh_ipsec_mode_types ipsec_mode; + /**< Mode of ipsec run */ + enum eh_ipsec_dir_types ipsec_dir; + /**< Direction of ipsec processing */ }; /* Workers registered by the application */ @@ -194,6 +216,10 @@ struct eh_app_worker_params { /**< Specify status of rx type burst */ uint64_t tx_internal_port : 1; /**< Specify whether tx internal port is available */ + uint64_t ipsec_mode : 1; + /**< Specify ipsec processing level */ + uint64_t ipsec_dir : 1; + /**< Specify direction of ipsec */ }; uint64_t u64; } cap; diff --git a/examples/ipsec-secgw/ipsec-secgw.c b/examples/ipsec-secgw/ipsec-secgw.c index 7506922..c5d95b9 100644 --- a/examples/ipsec-secgw/ipsec-secgw.c +++ b/examples/ipsec-secgw/ipsec-secgw.c @@ -2,6 +2,7 @@ * Copyright(c) 2016 Intel Corporation */ +#include #include #include #include @@ -14,6 +15,7 @@ #include #include #include +#include #include #include @@ -41,12 +43,17 @@ #include #include #include +#include +#include #include #include +#include "event_helper.h" #include "ipsec.h" #include "parser.h" +volatile bool force_quit; + #define RTE_LOGTYPE_IPSEC RTE_LOGTYPE_USER1 #define MAX_JUMBO_PKT_LEN 9600 @@ -133,12 +140,21 @@ struct flow_info flow_info_tbl[RTE_MAX_ETHPORTS]; #define CMD_LINE_OPT_CONFIG"config" #define CMD_LINE_OPT_SINGLE_SA "single-sa" #define CMD_LINE_OPT_CRYPTODEV_MASK"cryptodev_mask" +#define CMD_LINE_OPT_TRANSFER_MODE "transfer-mode" +#define CMD_LINE_OPT_SCHEDULE_TYPE "schedule-type" +#define CMD_LINE_OPT_IPSEC_MODE"process-mode" +#define CMD_LINE_OPT_IPSEC_DIR "process-dir" #define CMD_LINE_OPT_RX_OFFLOAD"rxoffload" #define CMD_LINE_OPT_TX_OFFLOAD"txoffload" #define CMD_LINE_OPT_REASSEMBLE"reassemble" #define CMD_LINE_OPT_MTU "mtu" #define CMD_LINE_OPT_FRAG_TTL "frag-ttl" +#define CMD_LINE_ARG_APP "app" +#define CMD_LINE_AR
[dpdk-dev] [PATCH 12/14] examples/ipsec-secgw: add driver outbound worker
From: Ankur Dwivedi This patch adds the driver outbound worker thread for ipsec-secgw. In this mode the security session is a fixed one and sa update is not done. Signed-off-by: Ankur Dwivedi Signed-off-by: Anoob Joseph Signed-off-by: Lukasz Bartosik --- examples/ipsec-secgw/ipsec-secgw.c | 12 + examples/ipsec-secgw/ipsec.c| 9 examples/ipsec-secgw/ipsec_worker.c | 90 - 3 files changed, 110 insertions(+), 1 deletion(-) diff --git a/examples/ipsec-secgw/ipsec-secgw.c b/examples/ipsec-secgw/ipsec-secgw.c index 2e7d4d8..76719f2 100644 --- a/examples/ipsec-secgw/ipsec-secgw.c +++ b/examples/ipsec-secgw/ipsec-secgw.c @@ -2011,6 +2011,18 @@ cryptodevs_init(void) i++; } + /* +* Set the queue pair to at least the number of ethernet +* devices for inline outbound. +*/ + qp = RTE_MAX(rte_eth_dev_count_avail(), qp); + + /* +* The requested number of queues should never exceed +* the max available +*/ + qp = RTE_MIN(qp, max_nb_qps); + if (qp == 0) continue; diff --git a/examples/ipsec-secgw/ipsec.c b/examples/ipsec-secgw/ipsec.c index e529f68..9ff8a63 100644 --- a/examples/ipsec-secgw/ipsec.c +++ b/examples/ipsec-secgw/ipsec.c @@ -141,6 +141,10 @@ create_lookaside_session(struct ipsec_ctx *ipsec_ctx, struct ipsec_sa *sa, return 0; } +uint16_t sa_no; +#define MAX_FIXED_SESSIONS 10 +struct rte_security_session *sec_session_fixed[MAX_FIXED_SESSIONS]; + int create_inline_session(struct socket_ctx *skt_ctx, struct ipsec_sa *sa, struct rte_ipsec_session *ips) @@ -401,6 +405,11 @@ create_inline_session(struct socket_ctx *skt_ctx, struct ipsec_sa *sa, ips->security.ol_flags = sec_cap->ol_flags; ips->security.ctx = sec_ctx; + if (sa_no < MAX_FIXED_SESSIONS) { + sec_session_fixed[sa_no] = + ipsec_get_primary_session(sa)->security.ses; + sa_no++; + } } set_cdev_id: diff --git a/examples/ipsec-secgw/ipsec_worker.c b/examples/ipsec-secgw/ipsec_worker.c index 2af9475..e202277 100644 --- a/examples/ipsec-secgw/ipsec_worker.c +++ b/examples/ipsec-secgw/ipsec_worker.c @@ -263,7 +263,7 @@ process_ipsec_ev_inbound(struct ipsec_ctx *ctx, struct route_table *rt, */ /* Workers registered */ -#define IPSEC_EVENTMODE_WORKERS2 +#define IPSEC_EVENTMODE_WORKERS3 /* * Event mode worker @@ -423,6 +423,84 @@ ipsec_wrkr_non_burst_int_port_app_mode_inb(struct eh_event_link_info *links, return; } +/* + * Event mode worker + * Operating parameters : non-burst - Tx internal port - driver mode - outbound + */ +extern struct rte_security_session *sec_session_fixed[]; +static void +ipsec_wrkr_non_burst_int_port_drvr_mode_outb(struct eh_event_link_info *links, + uint8_t nb_links) +{ + unsigned int nb_rx = 0; + struct rte_mbuf *pkt; + unsigned int port_id; + struct rte_event ev; + uint32_t lcore_id; + + /* Check if we have links registered for this lcore */ + if (nb_links == 0) { + /* No links registered - exit */ + goto exit; + } + + /* Get core ID */ + lcore_id = rte_lcore_id(); + + RTE_LOG(INFO, IPSEC, + "Launching event mode worker (non-burst - Tx internal port - " + "driver mode - outbound) on lcore %d\n", lcore_id); + + /* We have valid links */ + + /* Check if it's single link */ + if (nb_links != 1) { + RTE_LOG(INFO, IPSEC, + "Multiple links not supported. Using first link\n"); + } + + RTE_LOG(INFO, IPSEC, " -- lcoreid=%u event_port_id=%u\n", lcore_id, + links[0].event_port_id); + while (!force_quit) { + /* Read packet from event queues */ + nb_rx = rte_event_dequeue_burst(links[0].eventdev_id, + links[0].event_port_id, + &ev,/* events */ + 1, /* nb_events */ + 0 /* timeout_ticks */); + + if (nb_rx == 0) + continue; + + port_id = ev.queue_id; + pkt = ev.mbuf; + + rte_prefetch0(rte_pktmbuf_mtod(pkt, void *)); + + /* Process packet */ + ipsec_event_pre_forward(pkt, port_id); + + pkt->udata64 = (uint64_t) sec_session_fixed[port_id]; + + /* Mark the packet for Tx security offload */ + pkt->ol_flags |= PKT_TX_SEC_OFFLOAD; + + /* +* Since tx internal port is availab
[dpdk-dev] [PATCH 11/14] examples/ipsec-secgw: add app processing code
From: Lukasz Bartosik Add IPsec application processing code for event mode. Signed-off-by: Anoob Joseph Signed-off-by: Lukasz Bartosik --- examples/ipsec-secgw/ipsec-secgw.c | 124 ++ examples/ipsec-secgw/ipsec-secgw.h | 81 examples/ipsec-secgw/ipsec.h| 37 +++--- examples/ipsec-secgw/ipsec_worker.c | 242 ++-- examples/ipsec-secgw/ipsec_worker.h | 39 ++ examples/ipsec-secgw/sa.c | 11 -- 6 files changed, 409 insertions(+), 125 deletions(-) create mode 100644 examples/ipsec-secgw/ipsec-secgw.h create mode 100644 examples/ipsec-secgw/ipsec_worker.h diff --git a/examples/ipsec-secgw/ipsec-secgw.c b/examples/ipsec-secgw/ipsec-secgw.c index c5d95b9..2e7d4d8 100644 --- a/examples/ipsec-secgw/ipsec-secgw.c +++ b/examples/ipsec-secgw/ipsec-secgw.c @@ -50,12 +50,11 @@ #include "event_helper.h" #include "ipsec.h" +#include "ipsec_worker.h" #include "parser.h" volatile bool force_quit; -#define RTE_LOGTYPE_IPSEC RTE_LOGTYPE_USER1 - #define MAX_JUMBO_PKT_LEN 9600 #define MEMPOOL_CACHE_SIZE 256 @@ -70,8 +69,6 @@ volatile bool force_quit; #define BURST_TX_DRAIN_US 100 /* TX drain every ~100us */ -#define NB_SOCKETS 4 - /* Configure how many packets ahead to prefetch, when reading packets */ #define PREFETCH_OFFSET3 @@ -79,8 +76,6 @@ volatile bool force_quit; #define MAX_LCORE_PARAMS 1024 -#define UNPROTECTED_PORT(port) (unprotected_port_mask & (1 << portid)) - /* * Configurable number of RX/TX ring descriptors */ @@ -89,29 +84,6 @@ volatile bool force_quit; static uint16_t nb_rxd = IPSEC_SECGW_RX_DESC_DEFAULT; static uint16_t nb_txd = IPSEC_SECGW_TX_DESC_DEFAULT; -#if RTE_BYTE_ORDER != RTE_LITTLE_ENDIAN -#define __BYTES_TO_UINT64(a, b, c, d, e, f, g, h) \ - (((uint64_t)((a) & 0xff) << 56) | \ - ((uint64_t)((b) & 0xff) << 48) | \ - ((uint64_t)((c) & 0xff) << 40) | \ - ((uint64_t)((d) & 0xff) << 32) | \ - ((uint64_t)((e) & 0xff) << 24) | \ - ((uint64_t)((f) & 0xff) << 16) | \ - ((uint64_t)((g) & 0xff) << 8) | \ - ((uint64_t)(h) & 0xff)) -#else -#define __BYTES_TO_UINT64(a, b, c, d, e, f, g, h) \ - (((uint64_t)((h) & 0xff) << 56) | \ - ((uint64_t)((g) & 0xff) << 48) | \ - ((uint64_t)((f) & 0xff) << 40) | \ - ((uint64_t)((e) & 0xff) << 32) | \ - ((uint64_t)((d) & 0xff) << 24) | \ - ((uint64_t)((c) & 0xff) << 16) | \ - ((uint64_t)((b) & 0xff) << 8) | \ - ((uint64_t)(a) & 0xff)) -#endif -#define ETHADDR(a, b, c, d, e, f) (__BYTES_TO_UINT64(a, b, c, d, e, f, 0, 0)) - #define ETHADDR_TO_UINT64(addr) __BYTES_TO_UINT64( \ (addr)->addr_bytes[0], (addr)->addr_bytes[1], \ (addr)->addr_bytes[2], (addr)->addr_bytes[3], \ @@ -123,18 +95,6 @@ static uint16_t nb_txd = IPSEC_SECGW_TX_DESC_DEFAULT; #define MTU_TO_FRAMELEN(x) ((x) + RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN) -/* port/source ethernet addr and destination ethernet addr */ -struct ethaddr_info { - uint64_t src, dst; -}; - -struct ethaddr_info ethaddr_tbl[RTE_MAX_ETHPORTS] = { - { 0, ETHADDR(0x00, 0x16, 0x3e, 0x7e, 0x94, 0x9a) }, - { 0, ETHADDR(0x00, 0x16, 0x3e, 0x22, 0xa1, 0xd9) }, - { 0, ETHADDR(0x00, 0x16, 0x3e, 0x08, 0x69, 0x26) }, - { 0, ETHADDR(0x00, 0x16, 0x3e, 0x49, 0x9e, 0xdd) } -}; - struct flow_info flow_info_tbl[RTE_MAX_ETHPORTS]; #define CMD_LINE_OPT_CONFIG"config" @@ -192,10 +152,16 @@ static const struct option lgopts[] = { {NULL, 0, 0, 0} }; +struct ethaddr_info ethaddr_tbl[RTE_MAX_ETHPORTS] = { + { 0, ETHADDR(0x00, 0x16, 0x3e, 0x7e, 0x94, 0x9a) }, + { 0, ETHADDR(0x00, 0x16, 0x3e, 0x22, 0xa1, 0xd9) }, + { 0, ETHADDR(0x00, 0x16, 0x3e, 0x08, 0x69, 0x26) }, + { 0, ETHADDR(0x00, 0x16, 0x3e, 0x49, 0x9e, 0xdd) } +}; + /* mask of enabled ports */ static uint32_t enabled_port_mask; static uint64_t enabled_cryptodev_mask = UINT64_MAX; -static uint32_t unprotected_port_mask; static int32_t promiscuous_on = 1; static int32_t numa_on = 1; /**< NUMA is enabled by default. */ static uint32_t nb_lcores; @@ -283,8 +249,6 @@ static struct rte_eth_conf port_conf = { }, }; -static struct socket_ctx socket_ctx[NB_SOCKETS]; - /* * Determine is multi-segment support required: * - either frame buffer size is smaller then mtu @@ -2828,47 +2792,10 @@ main(int32_t argc, char **argv) sa_check_offloads(portid, &req_rx_offloads, &req_tx_offloads); port_init(portid, req_rx_offloads, req_tx_offloads); - /* Create default ipsec flow for the ethernet device */ - ret = create_default_ipsec_flow(portid, req_rx_offloads); - if (ret) - printf("Cannot create default flow, err=%d, port=%d\n", - ret, portid); } cryptodevs_init(); - /* start ports */ - RTE_E
[dpdk-dev] [PATCH 08/14] examples/ipsec-secgw: add support for internal ports
From: Lukasz Bartosik Add support for Rx and Tx internal ports. When internal ports are available then a packet can be received from eth port and forwarded to event queue by HW without any software intervention. The same applies to Tx side where a packet sent to an event queue can by forwarded by HW to eth port without any software intervention. Signed-off-by: Anoob Joseph Signed-off-by: Lukasz Bartosik --- examples/ipsec-secgw/event_helper.c | 231 examples/ipsec-secgw/event_helper.h | 11 ++ 2 files changed, 195 insertions(+), 47 deletions(-) diff --git a/examples/ipsec-secgw/event_helper.c b/examples/ipsec-secgw/event_helper.c index a67132a..6549875 100644 --- a/examples/ipsec-secgw/event_helper.c +++ b/examples/ipsec-secgw/event_helper.c @@ -100,6 +100,39 @@ eh_get_eventdev_params(struct eventmode_conf *em_conf, uint8_t eventdev_id) return &(em_conf->eventdev_config[i]); } + +static inline bool +eh_dev_has_rx_internal_port(uint8_t eventdev_id) +{ + int j; + bool flag = true; + + RTE_ETH_FOREACH_DEV(j) { + uint32_t caps = 0; + + rte_event_eth_rx_adapter_caps_get(eventdev_id, j, &caps); + if (!(caps & RTE_EVENT_ETH_RX_ADAPTER_CAP_INTERNAL_PORT)) + flag = false; + } + return flag; +} + +static inline bool +eh_dev_has_tx_internal_port(uint8_t eventdev_id) +{ + int j; + bool flag = true; + + RTE_ETH_FOREACH_DEV(j) { + uint32_t caps = 0; + + rte_event_eth_tx_adapter_caps_get(eventdev_id, j, &caps); + if (!(caps & RTE_EVENT_ETH_TX_ADAPTER_CAP_INTERNAL_PORT)) + flag = false; + } + return flag; +} + static inline bool eh_dev_has_burst_mode(uint8_t dev_id) { @@ -115,7 +148,9 @@ eh_set_default_conf_eventdev(struct eventmode_conf *em_conf) { struct eventdev_params *eventdev_config; struct rte_event_dev_info dev_info; + int lcore_count; int nb_eventdev; + int nb_eth_dev; int i, ret; /* Get the number of event devices */ @@ -126,6 +161,17 @@ eh_set_default_conf_eventdev(struct eventmode_conf *em_conf) return -EINVAL; } + /* Get the number of eth devs */ + nb_eth_dev = rte_eth_dev_count_avail(); + + if (nb_eth_dev == 0) { + EH_LOG_ERR("No eth devices detected"); + return -EINVAL; + } + + /* Get the number of lcores */ + lcore_count = rte_lcore_count(); + for (i = 0; i < nb_eventdev; i++) { /* Get the event dev conf */ @@ -152,11 +198,17 @@ eh_set_default_conf_eventdev(struct eventmode_conf *em_conf) eventdev_config->ev_queue_mode = RTE_EVENT_QUEUE_CFG_ALL_TYPES; - /* One port is required for eth Rx adapter */ - eventdev_config->nb_eventport -= 1; + /* Check if there are more queues than required */ + if (eventdev_config->nb_eventqueue > nb_eth_dev + 1) { + /* One queue is reserved for Tx */ + eventdev_config->nb_eventqueue = nb_eth_dev + 1; + } - /* One port is reserved for eth Tx adapter */ - eventdev_config->nb_eventport -= 1; + /* Check if there are more ports than required */ + if (eventdev_config->nb_eventport > lcore_count) { + /* One port per lcore is enough */ + eventdev_config->nb_eventport = lcore_count; + } /* Update the number of event devices */ em_conf->nb_eventdev++; @@ -165,6 +217,42 @@ eh_set_default_conf_eventdev(struct eventmode_conf *em_conf) return 0; } +static void +eh_do_capability_check(struct eventmode_conf *em_conf) +{ + struct eventdev_params *eventdev_config; + int all_internal_ports = 1; + uint32_t eventdev_id; + int i; + + for (i = 0; i < em_conf->nb_eventdev; i++) { + + /* Get the event dev conf */ + eventdev_config = &(em_conf->eventdev_config[i]); + eventdev_id = eventdev_config->eventdev_id; + + /* Check if event device has internal port for Rx & Tx */ + if (eh_dev_has_rx_internal_port(eventdev_id) && + eh_dev_has_tx_internal_port(eventdev_id)) { + eventdev_config->all_internal_ports = 1; + } else { + all_internal_ports = 0; + } + } + + /* +* If Rx & Tx internal ports are supported by all event devices then +* eth cores won't be required. Override the eth core mask requested +* and decrement number of event queues by one as it won't be needed +* for Tx. +*/ + if (all_internal_ports) { + rte_bi
[dpdk-dev] [PATCH 14/14] examples/ipsec-secgw: add cmd line option for bufs
From: Lukasz Bartosik Add command line option -s which can be used to configure number of buffers in a pool. Default number of buffers is 8192. Signed-off-by: Anoob Joseph Signed-off-by: Lukasz Bartosik --- examples/ipsec-secgw/ipsec-secgw.c | 23 +++ 1 file changed, 19 insertions(+), 4 deletions(-) diff --git a/examples/ipsec-secgw/ipsec-secgw.c b/examples/ipsec-secgw/ipsec-secgw.c index 76719f2..f8e28d6 100644 --- a/examples/ipsec-secgw/ipsec-secgw.c +++ b/examples/ipsec-secgw/ipsec-secgw.c @@ -59,8 +59,6 @@ volatile bool force_quit; #define MEMPOOL_CACHE_SIZE 256 -#define NB_MBUF(32000) - #define CDEV_QUEUE_DESC 2048 #define CDEV_MAP_ENTRIES 16384 #define CDEV_MP_NB_OBJS 1024 @@ -167,6 +165,7 @@ static int32_t numa_on = 1; /**< NUMA is enabled by default. */ static uint32_t nb_lcores; static uint32_t single_sa; static uint32_t single_sa_idx; +static uint32_t nb_bufs_in_pool = 8192; /* * RX/TX HW offload capabilities to enable/use on ethernet ports. @@ -1261,6 +1260,7 @@ print_usage(const char *prgname) " [-w REPLAY_WINDOW_SIZE]" " [-e]" " [-a]" + " [-s NUMBER_OF_MBUFS_IN_PKT_POOL]" " -f CONFIG_FILE" " --config (port,queue,lcore)[,(port,queue,lcore)]" " [--single-sa SAIDX]" @@ -1284,6 +1284,7 @@ print_usage(const char *prgname) " size for each SA\n" " -e enables ESN\n" " -a enables SA SQN atomic behaviour\n" + " -s number of mbufs in packet pool (default 8192)\n" " -f CONFIG_FILE: Configuration file\n" " --config (port,queue,lcore): Rx queue configuration\n" " --single-sa SAIDX: Use single SA index for outbound traffic,\n" @@ -1534,7 +1535,7 @@ parse_args(int32_t argc, char **argv, struct eh_conf *eh_conf) argvopt = argv; - while ((opt = getopt_long(argc, argvopt, "aelp:Pu:f:j:w:", + while ((opt = getopt_long(argc, argvopt, "aelp:Pu:f:j:w:s:", lgopts, &option_index)) != EOF) { switch (opt) { @@ -1568,6 +1569,19 @@ parse_args(int32_t argc, char **argv, struct eh_conf *eh_conf) cfgfile = optarg; f_present = 1; break; + + case 's': + ret = parse_decimal(optarg); + if (ret < 0) { + printf("Invalid number of buffers in a pool: " + "%s\n", optarg); + print_usage(prgname); + return -1; + } + + nb_bufs_in_pool = ret; + break; + case 'j': ret = parse_decimal(optarg); if (ret < RTE_MBUF_DEFAULT_BUF_SIZE || @@ -2792,11 +2806,12 @@ main(int32_t argc, char **argv) if (socket_ctx[socket_id].mbuf_pool) continue; - pool_init(&socket_ctx[socket_id], socket_id, NB_MBUF); + pool_init(&socket_ctx[socket_id], socket_id, nb_bufs_in_pool); session_pool_init(&socket_ctx[socket_id], socket_id, sess_sz); session_priv_pool_init(&socket_ctx[socket_id], socket_id, sess_sz); } + printf("Number of mbufs in packet pool %d\n", nb_bufs_in_pool); RTE_ETH_FOREACH_DEV(portid) { if ((enabled_port_mask & (1 << portid)) == 0) -- 2.7.4
[dpdk-dev] [PATCH 13/14] examples/ipsec-secgw: add app outbound worker
From: Ankur Dwivedi This patch adds the app outbound worker thread. Signed-off-by: Ankur Dwivedi Signed-off-by: Anoob Joseph Signed-off-by: Lukasz Bartosik --- examples/ipsec-secgw/ipsec_worker.c | 193 +++- 1 file changed, 192 insertions(+), 1 deletion(-) diff --git a/examples/ipsec-secgw/ipsec_worker.c b/examples/ipsec-secgw/ipsec_worker.c index e202277..41d2264 100644 --- a/examples/ipsec-secgw/ipsec_worker.c +++ b/examples/ipsec-secgw/ipsec_worker.c @@ -256,6 +256,101 @@ process_ipsec_ev_inbound(struct ipsec_ctx *ctx, struct route_table *rt, return 0; } +static inline int +process_ipsec_ev_outbound(struct ipsec_ctx *ctx, struct route_table *rt, + struct rte_event *ev) +{ + struct rte_ipsec_session *sess; + struct sa_ctx *sa_ctx; + struct rte_mbuf *pkt; + uint16_t port_id = 0; + struct ipsec_sa *sa; + enum pkt_type type; + uint32_t sa_idx; + uint8_t *nlp; + + /* Get pkt from event */ + pkt = ev->mbuf; + + /* Check the packet type */ + type = process_ipsec_get_pkt_type(pkt, &nlp); + + switch (type) { + case PKT_TYPE_PLAIN_IPV4: + /* Check if we have a match */ + if (check_sp(ctx->sp4_ctx, nlp, &sa_idx) == 0) { + /* No valid match */ + goto drop_pkt_and_exit; + } + break; + case PKT_TYPE_PLAIN_IPV6: + /* Check if we have a match */ + if (check_sp(ctx->sp6_ctx, nlp, &sa_idx) == 0) { + /* No valid match */ + goto drop_pkt_and_exit; + } + break; + default: + /* +* Only plain IPv4 & IPv6 packets are allowed +* on protected port. Drop the rest. +*/ + RTE_LOG(ERR, IPSEC, "Unsupported packet type = %d\n", type); + goto drop_pkt_and_exit; + } + + /* Check if the packet has to be bypassed */ + if (sa_idx == 0) { + port_id = get_route(pkt, rt, type); + if (unlikely(port_id == RTE_MAX_ETHPORTS)) { + /* no match */ + goto drop_pkt_and_exit; + } + /* else, we have a matching route */ + goto send_pkt; + } + + /* Else the packet has to be protected */ + + /* Get SA ctx*/ + sa_ctx = ctx->sa_ctx; + + /* Get SA */ + sa = &(sa_ctx->sa[sa_idx]); + + /* Get IPsec session */ + sess = ipsec_get_primary_session(sa); + + /* Allow only inline protocol for now */ + if (sess->type != RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL) { + RTE_LOG(ERR, IPSEC, "SA type not supported\n"); + goto drop_pkt_and_exit; + } + + if (sess->security.ol_flags & RTE_SECURITY_TX_OLOAD_NEED_MDATA) + pkt->udata64 = (uint64_t) sess->security.ses; + + /* Mark the packet for Tx security offload */ + pkt->ol_flags |= PKT_TX_SEC_OFFLOAD; + + /* Get the port to which this pkt need to be submitted */ + port_id = sa->portid; + +send_pkt: + /* Update mac addresses */ + update_mac_addrs(pkt, port_id); + + /* Update the event with the dest port */ + ipsec_event_pre_forward(pkt, port_id); + return 1; + +drop_pkt_and_exit: + RTE_LOG(ERR, IPSEC, "Outbound packet dropped\n"); + rte_pktmbuf_free(pkt); + ev->mbuf = NULL; + return 0; +} + /* * Event mode exposes various operating modes depending on the * capabilities of the event device and the operating mode @@ -263,7 +358,7 @@ process_ipsec_ev_inbound(struct ipsec_ctx *ctx, struct route_table *rt, */ /* Workers registered */ -#define IPSEC_EVENTMODE_WORKERS3 +#define IPSEC_EVENTMODE_WORKERS4 /* * Event mode worker @@ -501,6 +596,92 @@ ipsec_wrkr_non_burst_int_port_drvr_mode_outb(struct eh_event_link_info *links, return; } +/* + * Event mode worker + * Operating parameters : non-burst - Tx internal port - app mode - outbound + */ +static void +ipsec_wrkr_non_burst_int_port_app_mode_outb(struct eh_event_link_info *links, + uint8_t nb_links) +{ + struct lcore_conf_ev_tx_int_port_wrkr lconf; + unsigned int nb_rx = 0; + struct rte_event ev; + uint32_t lcore_id; + int32_t socket_id; + + /* Check if we have links registered for this lcore */ + if (nb_links == 0) { + /* No links registered - exit */ + goto exit; + } + + /* We have valid links */ + + /* Get core ID */ + lcore_id = rte_lcore_id(); + + /* Get socket ID */ + socket_id = rte_lcore_to_socket_id(lcore_id); + + /* Save routing table */ + lconf.rt.rt4_ctx = socket_ctx[socket_id].rt_ip4; + lconf.rt.rt6_ctx = socket_ctx[
[dpdk-dev] [PATCH v1 1/6] raw/octeontx2_ep: add build infra and device probe
Add the OCTEON TX2 SDP EP device probe along with the build infrastructure for Make and meson builds. Signed-off-by: Mahipal Challa --- MAINTAINERS| 5 + config/common_base | 5 + doc/guides/rawdevs/index.rst | 1 + doc/guides/rawdevs/octeontx2_ep.rst| 41 +++ drivers/raw/Makefile | 1 + drivers/raw/meson.build| 1 + drivers/raw/octeontx2_ep/Makefile | 40 +++ drivers/raw/octeontx2_ep/meson.build | 6 + drivers/raw/octeontx2_ep/otx2_ep_rawdev.c | 132 + drivers/raw/octeontx2_ep/otx2_ep_rawdev.h | 21 .../rte_rawdev_octeontx2_ep_version.map| 4 + mk/rte.app.mk | 2 + 12 files changed, 259 insertions(+) diff --git a/MAINTAINERS b/MAINTAINERS index 4395d8d..24f1240 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -1173,6 +1173,11 @@ M: Vamsi Attunuru F: drivers/raw/octeontx2_dma/ F: doc/guides/rawdevs/octeontx2_dma.rst +Marvell OCTEON TX2 EP +M: Mahipal Challa +F: drivers/raw/octeontx2_ep/ +F: doc/guides/rawdevs/octeontx2_ep.rst + NTB M: Xiaoyun Li M: Jingjing Wu diff --git a/config/common_base b/config/common_base index 7dec7ed..8e7dad2 100644 --- a/config/common_base +++ b/config/common_base @@ -796,6 +796,11 @@ CONFIG_RTE_LIBRTE_PMD_IOAT_RAWDEV=y CONFIG_RTE_LIBRTE_PMD_OCTEONTX2_DMA_RAWDEV=y # +# Compile PMD for octeontx2 EP raw device +# +CONFIG_RTE_LIBRTE_PMD_OCTEONTX2_EP_RAWDEV=y + +# # Compile PMD for NTB raw device # CONFIG_RTE_LIBRTE_PMD_NTB_RAWDEV=y diff --git a/doc/guides/rawdevs/index.rst b/doc/guides/rawdevs/index.rst index 22bc013..f64ec44 100644 --- a/doc/guides/rawdevs/index.rst +++ b/doc/guides/rawdevs/index.rst @@ -17,3 +17,4 @@ application through rawdev API. ioat ntb octeontx2_dma +octeontx2_ep diff --git a/doc/guides/rawdevs/octeontx2_ep.rst b/doc/guides/rawdevs/octeontx2_ep.rst new file mode 100644 index 000..5f5ed01 --- /dev/null +++ b/doc/guides/rawdevs/octeontx2_ep.rst @@ -0,0 +1,41 @@ +.. SPDX-License-Identifier: BSD-3-Clause +Copyright(c) 2019 Marvell International Ltd. + +Marvell OCTEON TX2 End Point Rawdev Driver +== + +OCTEON TX2 has an internal SDP unit which provides End Point mode of operation +by exposing its IOQs to Host, IOQs are used for packet I/O between Host and +OCTEON TX2. Each OCTEON TX2 SDP PF supports a max of 128 VFs and Each VF is +associated with a set of IOQ pairs. + +Features + + +This OCTEON TX2 End Point mode PMD supports + +#. Packet Input - Host to OCTEON TX2 with direct data instruction mode. + +#. Packet Output - OCTEON TX2 to Host with info pointer mode. + +Config File Options +~~~ + +The following options can be modified in the ``config`` file. + +- ``CONFIG_RTE_LIBRTE_PMD_OCTEONTX2_EP_RAWDEV`` (default ``y``) + + Toggle compilation of the ``lrte_pmd_octeontx2_ep`` driver. + +Initialization +-- + +The number of SDP VFs enabled, can be controlled by setting sysfs +entry `sriov_numvfs` for the corresponding PF driver. + +.. code-block:: console + + echo > /sys/bus/pci/drivers/octeontx2-ep/\:04\:00.0/sriov_numvfs + +Once the required VFs are enabled, to be accessible from DPDK, VFs need to be +bound to vfio-pci driver. diff --git a/drivers/raw/Makefile b/drivers/raw/Makefile index 0b6d13d..80b043e 100644 --- a/drivers/raw/Makefile +++ b/drivers/raw/Makefile @@ -13,5 +13,6 @@ DIRS-$(CONFIG_RTE_LIBRTE_PMD_IFPGA_RAWDEV) += ifpga DIRS-$(CONFIG_RTE_LIBRTE_PMD_IOAT_RAWDEV) += ioat DIRS-$(CONFIG_RTE_LIBRTE_PMD_NTB_RAWDEV) += ntb DIRS-$(CONFIG_RTE_LIBRTE_PMD_OCTEONTX2_DMA_RAWDEV) += octeontx2_dma +DIRS-$(CONFIG_RTE_LIBRTE_PMD_OCTEONTX2_EP_RAWDEV) += octeontx2_ep include $(RTE_SDK)/mk/rte.subdir.mk diff --git a/drivers/raw/meson.build b/drivers/raw/meson.build index d7037cd..bb57977 100644 --- a/drivers/raw/meson.build +++ b/drivers/raw/meson.build @@ -4,6 +4,7 @@ drivers = ['dpaa2_cmdif', 'dpaa2_qdma', 'ifpga', 'ioat', 'ntb', 'octeontx2_dma', + 'octeontx2_ep', 'skeleton'] std_deps = ['rawdev'] config_flag_fmt = 'RTE_LIBRTE_PMD_@0@_RAWDEV' diff --git a/drivers/raw/octeontx2_ep/Makefile b/drivers/raw/octeontx2_ep/Makefile new file mode 100644 index 000..8cec6bd --- /dev/null +++ b/drivers/raw/octeontx2_ep/Makefile @@ -0,0 +1,40 @@ +# SPDX-License-Identifier: BSD-3-Clause +# Copyright(C) 2019 Marvell International Ltd. +# + +include $(RTE_SDK)/mk/rte.vars.mk + +# Library name +LIB = librte_rawdev_octeontx2_ep.a + +# Build flags +CFLAGS += -O3 +CFLAGS += $(WERROR_FLAGS) + +CFLAGS += -I$(RTE_SDK)/drivers/common/octeontx2/ +CFLAGS += -I$(RTE_SDK)/drivers/raw/octeontx2_ep/ + +LDLIBS += -lrte_eal +LDLIBS += -lrte_rawdev +LDLIBS += -lrte_bus_pci +LDLIBS += -lrte_mempool +LDLIBS += -lrte_com
[dpdk-dev] [PATCH v1 2/6] raw/octeontx2_ep: add device configuration
Register "dev_configure" API to configure/initialize the SDP VF PCIe devices. Signed-off-by: Mahipal Challa --- doc/guides/rawdevs/octeontx2_ep.rst| 29 ++ drivers/common/octeontx2/hw/otx2_sdp.h | 184 + drivers/common/octeontx2/otx2_common.c | 9 + drivers/common/octeontx2/otx2_common.h | 4 + .../octeontx2/rte_common_octeontx2_version.map | 6 + drivers/raw/octeontx2_ep/Makefile | 3 + drivers/raw/octeontx2_ep/meson.build | 4 +- drivers/raw/octeontx2_ep/otx2_ep_enqdeq.c | 294 ++ drivers/raw/octeontx2_ep/otx2_ep_enqdeq.h | 11 + drivers/raw/octeontx2_ep/otx2_ep_rawdev.c | 148 +++ drivers/raw/octeontx2_ep/otx2_ep_rawdev.h | 434 - drivers/raw/octeontx2_ep/otx2_ep_vf.c | 408 +++ drivers/raw/octeontx2_ep/otx2_ep_vf.h | 10 + 13 files changed, 1542 insertions(+), 2 deletions(-) diff --git a/doc/guides/rawdevs/octeontx2_ep.rst b/doc/guides/rawdevs/octeontx2_ep.rst index 5f5ed01..2507fcf 100644 --- a/doc/guides/rawdevs/octeontx2_ep.rst +++ b/doc/guides/rawdevs/octeontx2_ep.rst @@ -39,3 +39,32 @@ entry `sriov_numvfs` for the corresponding PF driver. Once the required VFs are enabled, to be accessible from DPDK, VFs need to be bound to vfio-pci driver. + +Device Setup + + +The OCTEON TX2 SDP End Point VF devices will need to be bound to a +user-space IO driver for use. The script ``dpdk-devbind.py`` script +included with DPDK can be used to view the state of the devices and to bind +them to a suitable DPDK-supported kernel driver. When querying the status +of the devices, they will appear under the category of "Misc (rawdev) +devices", i.e. the command ``dpdk-devbind.py --status-dev misc`` can be +used to see the state of those devices alone. + +Device Configuration + + +Configuring SDP EP rawdev device is done using the ``rte_rawdev_configure()`` +API, which takes the mempool as parameter. PMD uses this pool to send/receive +packets to/from the HW. + +The following code shows how the device is configured + +.. code-block:: c + + struct sdp_rawdev_info config = {0}; + struct rte_rawdev_info rdev_info = {.dev_private = &config}; + config.enqdeq_mpool = (void *)rte_mempool_create(...); + + rte_rawdev_configure(dev_id, (rte_rawdev_obj_t)&rdev_info); + diff --git a/drivers/common/octeontx2/hw/otx2_sdp.h b/drivers/common/octeontx2/hw/otx2_sdp.h new file mode 100644 index 000..7e03317 --- /dev/null +++ b/drivers/common/octeontx2/hw/otx2_sdp.h @@ -0,0 +1,184 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(C) 2019 Marvell International Ltd. + */ + +#ifndef __OTX2_SDP_HW_H_ +#define __OTX2_SDP_HW_H_ + +/* SDP VF IOQs */ +#define SDP_MIN_RINGS_PER_VF(1) +#define SDP_MAX_RINGS_PER_VF(8) + +/* SDP VF IQ configuration */ +#define SDP_VF_MAX_IQ_DESCRIPTORS (512) +#define SDP_VF_MIN_IQ_DESCRIPTORS (128) + +#define SDP_VF_DB_MIN (1) +#define SDP_VF_DB_TIMEOUT (1) +#define SDP_VF_INTR_THRESHOLD (0x) + +#define SDP_VF_64BYTE_INSTR (64) +#define SDP_VF_32BYTE_INSTR (32) + +/* SDP VF OQ configuration */ +#define SDP_VF_MAX_OQ_DESCRIPTORS (512) +#define SDP_VF_MIN_OQ_DESCRIPTORS (128) +#define SDP_VF_OQ_BUF_SIZE (2048) +#define SDP_VF_OQ_REFIL_THRESHOLD (16) + +#define SDP_VF_OQ_INFOPTR_MODE (1) +#define SDP_VF_OQ_BUFPTR_MODE (0) + +#define SDP_VF_OQ_INTR_PKT (1) +#define SDP_VF_OQ_INTR_TIME (10) +#define SDP_VF_CFG_IO_QUEUESSDP_MAX_RINGS_PER_VF + +/* Wait time in milliseconds for FLR */ +#define SDP_VF_PCI_FLR_WAIT (100) +#define SDP_VF_BUSY_LOOP_COUNT (1) + +#define SDP_VF_MAX_IO_QUEUESSDP_MAX_RINGS_PER_VF +#define SDP_VF_MIN_IO_QUEUESSDP_MIN_RINGS_PER_VF + +/* SDP VF IOQs per rawdev */ +#define SDP_VF_MAX_IOQS_PER_RAWDEV SDP_VF_MAX_IO_QUEUES +#define SDP_VF_DEFAULT_IOQS_PER_RAWDEV SDP_VF_MIN_IO_QUEUES + +/* SDP VF Register definitions */ +#define SDP_VF_RING_OFFSET(0x1ull << 17) + +/* SDP VF IQ Registers */ +#define SDP_VF_R_IN_CONTROL_START (0x1) +#define SDP_VF_R_IN_ENABLE_START (0x10010) +#define SDP_VF_R_IN_INSTR_BADDR_START (0x10020) +#define SDP_VF_R_IN_INSTR_RSIZE_START (0x10030) +#define SDP_VF_R_IN_INSTR_DBELL_START (0x10040) +#define SDP_VF_R_IN_CNTS_START(0x10050) +#define SDP_VF_R_IN_INT_LEVELS_START (0x10060) +#define SDP_VF_R_IN_PKT_CNT_START (0x10080) +#define SDP_VF_R_IN_BYTE_CNT_START(0x10090) + +#define SDP_VF_R_IN_CONTROL(ring) \ + (SDP_VF_R_IN_CONTROL_START + ((ring) * SDP_VF_RING_OFFSET)) + +#define SDP_VF_R_IN_ENABLE(ring) \ + (SDP_VF_R_IN_ENABLE_START + ((ring) * SDP_VF_RING_OFFSET)) + +#define SDP_VF_R_IN_INSTR_BADDR(ring) \ + (SDP_VF_R_IN_INSTR_BADDR_START
[dpdk-dev] [PATCH v1 4/6] raw/octeontx2_ep: add enqueue operation
Add rawdev enqueue operation for SDP VF devices. Signed-off-by: Mahipal Challa --- doc/guides/rawdevs/octeontx2_ep.rst | 6 + drivers/raw/octeontx2_ep/otx2_ep_enqdeq.c | 242 ++ drivers/raw/octeontx2_ep/otx2_ep_enqdeq.h | 39 + drivers/raw/octeontx2_ep/otx2_ep_rawdev.c | 1 + drivers/raw/octeontx2_ep/otx2_ep_rawdev.h | 20 +++ drivers/raw/octeontx2_ep/otx2_ep_vf.c | 24 +++ 6 files changed, 332 insertions(+) diff --git a/doc/guides/rawdevs/octeontx2_ep.rst b/doc/guides/rawdevs/octeontx2_ep.rst index 2507fcf..39a7c29 100644 --- a/doc/guides/rawdevs/octeontx2_ep.rst +++ b/doc/guides/rawdevs/octeontx2_ep.rst @@ -68,3 +68,9 @@ The following code shows how the device is configured rte_rawdev_configure(dev_id, (rte_rawdev_obj_t)&rdev_info); +Performing Data Transfer + + +To perform data transfer using SDP VF EP rawdev devices use standard +``rte_rawdev_enqueue_buffers()`` and ``rte_rawdev_dequeue_buffers()`` APIs. + diff --git a/drivers/raw/octeontx2_ep/otx2_ep_enqdeq.c b/drivers/raw/octeontx2_ep/otx2_ep_enqdeq.c index 584b818..ebbacfb 100644 --- a/drivers/raw/octeontx2_ep/otx2_ep_enqdeq.c +++ b/drivers/raw/octeontx2_ep/otx2_ep_enqdeq.c @@ -403,3 +403,245 @@ return -ENOMEM; } +static inline void +sdp_iqreq_delete(struct sdp_device *sdpvf, + struct sdp_instr_queue *iq, uint32_t idx) +{ + uint32_t reqtype; + void *buf; + + buf = iq->req_list[idx].buf; + reqtype = iq->req_list[idx].reqtype; + + switch (reqtype) { + case SDP_REQTYPE_NORESP: + rte_mempool_put(sdpvf->enqdeq_mpool, buf); + otx2_sdp_dbg("IQ buffer freed at idx[%d]", idx); + break; + + case SDP_REQTYPE_NORESP_GATHER: + case SDP_REQTYPE_NONE: + default: + otx2_info("This iqreq mode is not supported:%d", reqtype); + + } + + /* Reset the request list at this index */ + iq->req_list[idx].buf = NULL; + iq->req_list[idx].reqtype = 0; +} + +static inline void +sdp_iqreq_add(struct sdp_instr_queue *iq, void *buf, + uint32_t reqtype) +{ + iq->req_list[iq->host_write_index].buf = buf; + iq->req_list[iq->host_write_index].reqtype = reqtype; + + otx2_sdp_dbg("IQ buffer added at idx[%d]", iq->host_write_index); + +} + +static void +sdp_flush_iq(struct sdp_device *sdpvf, + struct sdp_instr_queue *iq, + uint32_t pending_thresh __rte_unused) +{ + uint32_t instr_processed = 0; + + rte_spinlock_lock(&iq->lock); + + iq->otx_read_index = sdpvf->fn_list.update_iq_read_idx(iq); + while (iq->flush_index != iq->otx_read_index) { + /* Free the IQ data buffer to the pool */ + sdp_iqreq_delete(sdpvf, iq, iq->flush_index); + iq->flush_index = + sdp_incr_index(iq->flush_index, 1, iq->nb_desc); + + instr_processed++; + } + + iq->stats.instr_processed = instr_processed; + rte_atomic64_sub(&iq->instr_pending, instr_processed); + + rte_spinlock_unlock(&iq->lock); +} + +static inline void +sdp_ring_doorbell(struct sdp_device *sdpvf __rte_unused, + struct sdp_instr_queue *iq) +{ + otx2_write64(iq->fill_cnt, iq->doorbell_reg); + + /* Make sure doorbell write goes through */ + rte_wmb(); + iq->fill_cnt = 0; + +} + +static inline int +post_iqcmd(struct sdp_instr_queue *iq, uint8_t *iqcmd) +{ + uint8_t *iqptr, cmdsize; + + /* This ensures that the read index does not wrap around to +* the same position if queue gets full before OCTEON TX2 could +* fetch any instr. +*/ + if (rte_atomic64_read(&iq->instr_pending) >= + (int32_t)(iq->nb_desc - 1)) { + otx2_err("IQ is full, pending:%ld", +(long)rte_atomic64_read(&iq->instr_pending)); + + return SDP_IQ_SEND_FAILED; + } + + /* Copy cmd into iq */ + cmdsize = ((iq->iqcmd_64B) ? 64 : 32); + iqptr = iq->base_addr + (cmdsize * iq->host_write_index); + + rte_memcpy(iqptr, iqcmd, cmdsize); + + otx2_sdp_dbg("IQ cmd posted @ index:%d", iq->host_write_index); + + /* Increment the host write index */ + iq->host_write_index = + sdp_incr_index(iq->host_write_index, 1, iq->nb_desc); + + iq->fill_cnt++; + + /* Flush the command into memory. We need to be sure the data +* is in memory before indicating that the instruction is +* pending. +*/ + rte_wmb(); + rte_atomic64_inc(&iq->instr_pending); + + /* SDP_IQ_SEND_SUCCESS */ + return 0; +} + + +static int +sdp_send_data(struct sdp_device *sdpvf, + struct sdp_instr_queue *iq, void *cmd) +{ + uint32_t ret; + + /* Lock this IQ command queue before posting instruction */ + rte_spinloc
[dpdk-dev] [PATCH v1 3/6] raw/octeontx2_ep: add device uninitialization
Add rawdev close/uninitialize operation for SDP VF devices. Signed-off-by: Mahipal Challa --- drivers/raw/octeontx2_ep/otx2_ep_enqdeq.c | 111 ++ drivers/raw/octeontx2_ep/otx2_ep_rawdev.c | 78 + drivers/raw/octeontx2_ep/otx2_ep_rawdev.h | 8 +++ drivers/raw/octeontx2_ep/otx2_ep_vf.c | 44 4 files changed, 241 insertions(+) diff --git a/drivers/raw/octeontx2_ep/otx2_ep_enqdeq.c b/drivers/raw/octeontx2_ep/otx2_ep_enqdeq.c index 8857004..584b818 100644 --- a/drivers/raw/octeontx2_ep/otx2_ep_enqdeq.c +++ b/drivers/raw/octeontx2_ep/otx2_ep_enqdeq.c @@ -21,6 +21,59 @@ #include "otx2_common.h" #include "otx2_ep_enqdeq.h" +static void +sdp_dmazone_free(const struct rte_memzone *mz) +{ + const struct rte_memzone *mz_tmp; + int ret = 0; + + if (mz == NULL) { + otx2_err("Memzone %s : NULL", mz->name); + return; + } + + mz_tmp = rte_memzone_lookup(mz->name); + if (mz_tmp == NULL) { + otx2_err("Memzone %s Not Found", mz->name); + return; + } + + ret = rte_memzone_free(mz); + if (ret) + otx2_err("Memzone free failed : ret = %d", ret); + +} + +/* Free IQ resources */ +int +sdp_delete_iqs(struct sdp_device *sdpvf, uint32_t iq_no) +{ + struct sdp_instr_queue *iq; + + iq = sdpvf->instr_queue[iq_no]; + if (iq == NULL) { + otx2_err("Invalid IQ[%d]\n", iq_no); + return -ENOMEM; + } + + rte_free(iq->req_list); + iq->req_list = NULL; + + if (iq->iq_mz) { + sdp_dmazone_free(iq->iq_mz); + iq->iq_mz = NULL; + } + + rte_free(sdpvf->instr_queue[iq_no]); + sdpvf->instr_queue[iq_no] = NULL; + + sdpvf->num_iqs--; + + otx2_info("IQ[%d] is deleted", iq_no); + + return 0; +} + /* IQ initialization */ static int sdp_init_instr_queue(struct sdp_device *sdpvf, int iq_no) @@ -126,6 +179,7 @@ return 0; delete_IQ: + sdp_delete_iqs(sdpvf, iq_no); return -ENOMEM; } @@ -139,6 +193,61 @@ rte_atomic64_set(&droq->pkts_pending, 0); } +static void +sdp_droq_destroy_ring_buffers(struct sdp_device *sdpvf, + struct sdp_droq *droq) +{ + uint32_t idx; + + for (idx = 0; idx < droq->nb_desc; idx++) { + if (droq->recv_buf_list[idx].buffer) { + rte_mempool_put(sdpvf->enqdeq_mpool, + droq->recv_buf_list[idx].buffer); + + droq->recv_buf_list[idx].buffer = NULL; + } + } + + sdp_droq_reset_indices(droq); +} + +/* Free OQs resources */ +int +sdp_delete_oqs(struct sdp_device *sdpvf, uint32_t oq_no) +{ + struct sdp_droq *droq; + + droq = sdpvf->droq[oq_no]; + if (droq == NULL) { + otx2_err("Invalid droq[%d]", oq_no); + return -ENOMEM; + } + + sdp_droq_destroy_ring_buffers(sdpvf, droq); + rte_free(droq->recv_buf_list); + droq->recv_buf_list = NULL; + + if (droq->info_mz) { + sdp_dmazone_free(droq->info_mz); + droq->info_mz = NULL; + } + + if (droq->desc_ring_mz) { + sdp_dmazone_free(droq->desc_ring_mz); + droq->desc_ring_mz = NULL; + } + + memset(droq, 0, SDP_DROQ_SIZE); + + rte_free(sdpvf->droq[oq_no]); + sdpvf->droq[oq_no] = NULL; + + sdpvf->num_oqs--; + + otx2_info("OQ[%d] is deleted", oq_no); + return 0; +} + static int sdp_droq_setup_ring_buffers(struct sdp_device *sdpvf, struct sdp_droq *droq) @@ -290,5 +399,7 @@ return 0; delete_OQ: + sdp_delete_oqs(sdpvf, oq_no); return -ENOMEM; } + diff --git a/drivers/raw/octeontx2_ep/otx2_ep_rawdev.c b/drivers/raw/octeontx2_ep/otx2_ep_rawdev.c index 5db9b50..2c43d3f 100644 --- a/drivers/raw/octeontx2_ep/otx2_ep_rawdev.c +++ b/drivers/raw/octeontx2_ep/otx2_ep_rawdev.c @@ -63,6 +63,45 @@ } static int +sdp_vfdev_exit(struct rte_rawdev *rawdev) +{ + struct sdp_device *sdpvf; + uint32_t rawdev_queues, q; + + otx2_info("%s:", __func__); + + sdpvf = (struct sdp_device *)rawdev->dev_private; + + sdpvf->fn_list.disable_io_queues(sdpvf); + + rawdev_queues = sdpvf->num_oqs; + for (q = 0; q < rawdev_queues; q++) { + if (sdp_delete_oqs(sdpvf, q)) { + otx2_err("Failed to delete OQ:%d", q); + return -ENOMEM; + } + } + otx2_info("Num OQs:%d freed", sdpvf->num_oqs); + + /* Free the oqbuf_pool */ + rte_mempool_free(sdpvf->enqdeq_mpool); + sdpvf->enqdeq_mpool = NULL; + + otx2_info("Enqdeq_mpool free done"); + + rawdev_queues = sdpvf->num_iqs; + for (q = 0; q < rawdev_queues; q++) { + if (sdp_delete_iqs(sdpvf, q)) { +
[dpdk-dev] [PATCH v1 0/6] OCTEON TX2 End Point Driver
This patchset adds support for OCTEON TX2 end point mode of operation. The driver implementation uses DPDK rawdevice sub-system. Mahipal Challa (6): raw/octeontx2_ep: add build infra and device probe raw/octeontx2_ep: add device configuration raw/octeontx2_ep: add device uninitialization raw/octeontx2_ep: add enqueue operation raw/octeontx2_ep: add dequeue operation raw/octeontx2_ep: add driver self test MAINTAINERS| 5 + config/common_base | 5 + doc/guides/rawdevs/index.rst | 1 + doc/guides/rawdevs/octeontx2_ep.rst| 89 +++ drivers/common/octeontx2/hw/otx2_sdp.h | 184 + drivers/common/octeontx2/otx2_common.c | 9 + drivers/common/octeontx2/otx2_common.h | 4 + .../octeontx2/rte_common_octeontx2_version.map | 6 + drivers/raw/Makefile | 1 + drivers/raw/meson.build| 1 + drivers/raw/octeontx2_ep/Makefile | 44 ++ drivers/raw/octeontx2_ep/meson.build | 9 + drivers/raw/octeontx2_ep/otx2_ep_enqdeq.c | 846 + drivers/raw/octeontx2_ep/otx2_ep_enqdeq.h | 52 ++ drivers/raw/octeontx2_ep/otx2_ep_rawdev.c | 361 + drivers/raw/octeontx2_ep/otx2_ep_rawdev.h | 499 drivers/raw/octeontx2_ep/otx2_ep_test.c| 164 drivers/raw/octeontx2_ep/otx2_ep_vf.c | 476 drivers/raw/octeontx2_ep/otx2_ep_vf.h | 10 + .../rte_rawdev_octeontx2_ep_version.map| 4 + mk/rte.app.mk | 2 + 21 files changed, 2772 insertions(+) create mode 100644 doc/guides/rawdevs/octeontx2_ep.rst create mode 100644 drivers/common/octeontx2/hw/otx2_sdp.h create mode 100644 drivers/raw/octeontx2_ep/Makefile create mode 100644 drivers/raw/octeontx2_ep/meson.build create mode 100644 drivers/raw/octeontx2_ep/otx2_ep_enqdeq.c create mode 100644 drivers/raw/octeontx2_ep/otx2_ep_enqdeq.h create mode 100644 drivers/raw/octeontx2_ep/otx2_ep_rawdev.c create mode 100644 drivers/raw/octeontx2_ep/otx2_ep_rawdev.h create mode 100644 drivers/raw/octeontx2_ep/otx2_ep_test.c create mode 100644 drivers/raw/octeontx2_ep/otx2_ep_vf.c create mode 100644 drivers/raw/octeontx2_ep/otx2_ep_vf.h create mode 100644 drivers/raw/octeontx2_ep/rte_rawdev_octeontx2_ep_version.map -- 1.8.3.1
[dpdk-dev] [PATCH v1 5/6] raw/octeontx2_ep: add dequeue operation
Add rawdev dequeue operation for SDP VF devices. Signed-off-by: Mahipal Challa --- drivers/raw/octeontx2_ep/otx2_ep_enqdeq.c | 199 ++ drivers/raw/octeontx2_ep/otx2_ep_enqdeq.h | 2 + drivers/raw/octeontx2_ep/otx2_ep_rawdev.c | 1 + drivers/raw/octeontx2_ep/otx2_ep_rawdev.h | 18 ++- 4 files changed, 219 insertions(+), 1 deletion(-) diff --git a/drivers/raw/octeontx2_ep/otx2_ep_enqdeq.c b/drivers/raw/octeontx2_ep/otx2_ep_enqdeq.c index ebbacfb..451fcc0 100644 --- a/drivers/raw/octeontx2_ep/otx2_ep_enqdeq.c +++ b/drivers/raw/octeontx2_ep/otx2_ep_enqdeq.c @@ -260,6 +260,7 @@ rte_mempool_get(sdpvf->enqdeq_mpool, &buf); if (buf == NULL) { otx2_err("OQ buffer alloc failed"); + droq->stats.rx_alloc_failure++; /* sdp_droq_destroy_ring_buffers(droq);*/ return -ENOMEM; } @@ -645,3 +646,201 @@ return SDP_IQ_SEND_FAILED; } +static uint32_t +sdp_droq_refill(struct sdp_device *sdpvf, struct sdp_droq *droq) +{ + struct sdp_droq_desc *desc_ring; + uint32_t desc_refilled = 0; + void *buf = NULL; + + desc_ring = droq->desc_ring; + + while (droq->refill_count && (desc_refilled < droq->nb_desc)) { + /* If a valid buffer exists (happens if there is no dispatch), +* reuse the buffer, else allocate. +*/ + if (droq->recv_buf_list[droq->refill_idx].buffer != NULL) + break; + + rte_mempool_get(sdpvf->enqdeq_mpool, &buf); + /* If a buffer could not be allocated, no point in +* continuing +*/ + if (buf == NULL) { + droq->stats.rx_alloc_failure++; + break; + } + + droq->recv_buf_list[droq->refill_idx].buffer = buf; + desc_ring[droq->refill_idx].buffer_ptr = rte_mem_virt2iova(buf); + + /* Reset any previous values in the length field. */ + droq->info_list[droq->refill_idx].length = 0; + + droq->refill_idx = sdp_incr_index(droq->refill_idx, 1, + droq->nb_desc); + + desc_refilled++; + droq->refill_count--; + + } + + return desc_refilled; +} + +static int +sdp_droq_read_packet(struct sdp_device *sdpvf __rte_unused, +struct sdp_droq *droq, +struct sdp_droq_pkt *droq_pkt) +{ + struct sdp_droq_info *info; + uint32_t total_len = 0; + uint32_t pkt_len = 0; + + info = &droq->info_list[droq->read_idx]; + sdp_swap_8B_data((uint64_t *)&info->length, 1); + if (!info->length) { + otx2_err("OQ info_list->length[%ld]", (long)info->length); + goto oq_read_fail; + } + + /* Deduce the actual data size */ + info->length -= SDP_RH_SIZE; + total_len += (uint32_t)info->length; + + otx2_sdp_dbg("OQ: pkt_len[%ld], buffer_size %d", + (long)info->length, droq->buffer_size); + if (info->length > droq->buffer_size) { + otx2_err("This mode is not supported: pkt_len > buffer_size"); + goto oq_read_fail; + } + + if (info->length <= droq->buffer_size) { + pkt_len = (uint32_t)info->length; + droq_pkt->data = droq->recv_buf_list[droq->read_idx].buffer; + droq_pkt->len = pkt_len; + + droq->recv_buf_list[droq->read_idx].buffer = NULL; + droq->read_idx = sdp_incr_index(droq->read_idx, 1,/* count */ + droq->nb_desc /* max rd idx */); + droq->refill_count++; + + } + + info->length = 0; + + return SDP_OQ_RECV_SUCCESS; + +oq_read_fail: + return SDP_OQ_RECV_FAILED; +} + +static inline uint32_t +sdp_check_droq_pkts(struct sdp_droq *droq, uint32_t burst_size) +{ + uint32_t min_pkts = 0; + uint32_t new_pkts; + uint32_t pkt_count; + + /* Latest available OQ packets */ + pkt_count = rte_read32(droq->pkts_sent_reg); + + /* Newly arrived packets */ + new_pkts = pkt_count - droq->last_pkt_count; + otx2_sdp_dbg("Recvd [%d] new OQ pkts", new_pkts); + + min_pkts = (new_pkts > burst_size) ? burst_size : new_pkts; + if (min_pkts) { + rte_atomic64_add(&droq->pkts_pending, min_pkts); + /* Back up the aggregated packet count so far */ + droq->last_pkt_count += min_pkts; + } + + return min_pkts; +} + +/* Check for response arrival from OCTEON TX2 + * returns number of requests completed + */ +int +sdp_rawdev_dequeue(struct rte_rawdev *rawdev, + struct rte_rawdev_buf **buffers, unsigned int count, + rte_rawdev_obj_t context __rte_unuse
[dpdk-dev] [PATCH v1 6/6] raw/octeontx2_ep: add driver self test
Add rawdev's selftest feature in SDP VF driver, which verifies the EP mode functionality test. Signed-off-by: Mahipal Challa --- doc/guides/rawdevs/octeontx2_ep.rst | 13 +++ drivers/raw/octeontx2_ep/Makefile | 1 + drivers/raw/octeontx2_ep/meson.build | 1 + drivers/raw/octeontx2_ep/otx2_ep_rawdev.c | 1 + drivers/raw/octeontx2_ep/otx2_ep_rawdev.h | 2 + drivers/raw/octeontx2_ep/otx2_ep_test.c | 164 ++ 6 files changed, 182 insertions(+) diff --git a/doc/guides/rawdevs/octeontx2_ep.rst b/doc/guides/rawdevs/octeontx2_ep.rst index 39a7c29..bbcf530 100644 --- a/doc/guides/rawdevs/octeontx2_ep.rst +++ b/doc/guides/rawdevs/octeontx2_ep.rst @@ -74,3 +74,16 @@ Performing Data Transfer To perform data transfer using SDP VF EP rawdev devices use standard ``rte_rawdev_enqueue_buffers()`` and ``rte_rawdev_dequeue_buffers()`` APIs. +Self test +- + +On EAL initialization, SDP VF devices will be probed and populated into the +raw devices. The rawdev ID of the device can be obtained using + +* Invoke ``rte_rawdev_get_dev_id("SDPEP:x")`` from the test application + where x is the VF device's bus id specified in "bus:device.func"(BDF) + format. Use this index for further rawdev function calls. + +* The driver's selftest rawdev API can be used to verify the SDP EP mode + functional tests which can send/receive the raw data packets to/from the + EP device. diff --git a/drivers/raw/octeontx2_ep/Makefile b/drivers/raw/octeontx2_ep/Makefile index 02853fb..44fdf89 100644 --- a/drivers/raw/octeontx2_ep/Makefile +++ b/drivers/raw/octeontx2_ep/Makefile @@ -37,6 +37,7 @@ LIBABIVER := 1 # SRCS-$(CONFIG_RTE_LIBRTE_PMD_OCTEONTX2_EP_RAWDEV) += otx2_ep_rawdev.c SRCS-$(CONFIG_RTE_LIBRTE_PMD_OCTEONTX2_EP_RAWDEV) += otx2_ep_enqdeq.c +SRCS-$(CONFIG_RTE_LIBRTE_PMD_OCTEONTX2_EP_RAWDEV) += otx2_ep_test.c SRCS-$(CONFIG_RTE_LIBRTE_PMD_OCTEONTX2_EP_RAWDEV) += otx2_ep_vf.c diff --git a/drivers/raw/octeontx2_ep/meson.build b/drivers/raw/octeontx2_ep/meson.build index 99e6c6d..0e6338f 100644 --- a/drivers/raw/octeontx2_ep/meson.build +++ b/drivers/raw/octeontx2_ep/meson.build @@ -5,4 +5,5 @@ deps += ['bus_pci', 'common_octeontx2', 'rawdev'] sources = files('otx2_ep_rawdev.c', 'otx2_ep_enqdeq.c', + 'otx2_ep_test.c', 'otx2_ep_vf.c') diff --git a/drivers/raw/octeontx2_ep/otx2_ep_rawdev.c b/drivers/raw/octeontx2_ep/otx2_ep_rawdev.c index ddb208d..c5c0cf3 100644 --- a/drivers/raw/octeontx2_ep/otx2_ep_rawdev.c +++ b/drivers/raw/octeontx2_ep/otx2_ep_rawdev.c @@ -253,6 +253,7 @@ .dev_close = sdp_rawdev_close, .enqueue_bufs = sdp_rawdev_enqueue, .dequeue_bufs = sdp_rawdev_dequeue, + .dev_selftest = sdp_rawdev_selftest, }; static int diff --git a/drivers/raw/octeontx2_ep/otx2_ep_rawdev.h b/drivers/raw/octeontx2_ep/otx2_ep_rawdev.h index a77cbab..dab2fb7 100644 --- a/drivers/raw/octeontx2_ep/otx2_ep_rawdev.h +++ b/drivers/raw/octeontx2_ep/otx2_ep_rawdev.h @@ -494,4 +494,6 @@ int sdp_rawdev_enqueue(struct rte_rawdev *dev, struct rte_rawdev_buf **buffers, int sdp_rawdev_dequeue(struct rte_rawdev *dev, struct rte_rawdev_buf **buffers, unsigned int count, rte_rawdev_obj_t context); +int sdp_rawdev_selftest(uint16_t dev_id); + #endif /* _OTX2_EP_RAWDEV_H_ */ diff --git a/drivers/raw/octeontx2_ep/otx2_ep_test.c b/drivers/raw/octeontx2_ep/otx2_ep_test.c new file mode 100644 index 000..96fedb5 --- /dev/null +++ b/drivers/raw/octeontx2_ep/otx2_ep_test.c @@ -0,0 +1,164 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(C) 2019 Marvell International Ltd. + */ + +#include +#include +#include +#include + +#include +#include +#include +#include + +#include +#include + +#include "otx2_common.h" +#include "otx2_ep_rawdev.h" + +#define SDP_IOQ_NUM_BUFS (4 * 1024) +#define SDP_IOQ_BUF_SIZE (2 * 1024) + +#define SDP_TEST_PKT_FSZ (0) +#define SDP_TEST_PKT_SIZE (1024) + +static int +sdp_validate_data(struct sdp_droq_pkt *oq_pkt, uint8_t *iq_pkt, + uint32_t pkt_len) +{ + if (!oq_pkt) + return -EINVAL; + + if (pkt_len != oq_pkt->len) { + otx2_err("Invalid packet length"); + return -EINVAL; + } + + if (memcmp(oq_pkt->data, iq_pkt, pkt_len) != 0) { + otx2_err("Data validation failed"); + return -EINVAL; + } + otx2_sdp_dbg("Data validation successful"); + + return 0; +} + +static void +sdp_ioq_buffer_fill(uint8_t *addr, uint32_t len) +{ + uint32_t idx; + + memset(addr, 0, len); + + for (idx = 0; idx < len; idx++) + addr[idx] = idx; +} + +static struct rte_mempool* +sdp_ioq_mempool_create(void) +{ + struct rte_mempool *mpool; + + mpool = rte_mempool_create("ioqbuf_pool", + SDP_IOQ_NUM_BUFS /*num elt*/, + SDP_IOQ_BUF
Re: [dpdk-dev] [dpdk-stable] [PATCH] net/bonding: do not inherit slave device configuration
On 2019-11-19 07:40, Andrew Rybchenko wrote: > On 11/19/19 3:18 PM, Ferruh Yigit wrote: >> On 11/19/2019 9:03 AM, Andrew Rybchenko wrote: >>> Bonding device should control bonded devices configuration. >>> >>> Also avoid usage of slave's data->dev_conf. >>> >>> Fixes: 2efb58cbab6e ("bond: new link bonding library") >>> Cc: sta...@dpdk.org >>> >>> Signed-off-by: Andrew Rybchenko >>> --- >>> drivers/net/bonding/rte_eth_bond_pmd.c | 24 >>> 1 file changed, 12 insertions(+), 12 deletions(-) >>> >>> diff --git a/drivers/net/bonding/rte_eth_bond_pmd.c b/drivers/net/bonding/rte_eth_bond_pmd.c >>> index 707a0f3cdd..4f0e83205d 100644 >>> --- a/drivers/net/bonding/rte_eth_bond_pmd.c >>> +++ b/drivers/net/bonding/rte_eth_bond_pmd.c >>> @@ -1679,6 +1679,7 @@ int >>> slave_configure(struct rte_eth_dev *bonded_eth_dev, >>>struct rte_eth_dev *slave_eth_dev) >>> { >>> + struct rte_eth_conf dev_conf; >>>struct bond_rx_queue *bd_rx_q; >>>struct bond_tx_queue *bd_tx_q; >>>uint16_t nb_rx_queues; >>> @@ -1693,34 +1694,34 @@ slave_configure(struct rte_eth_dev *bonded_eth_dev, >>>/* Stop slave */ >>>rte_eth_dev_stop(slave_eth_dev->data->port_id); >>> >>> + memset(&dev_conf, 0, sizeof(dev_conf)); >>> + >>>/* Enable interrupts on slave device if supported */ >>>if (slave_eth_dev->data->dev_flags & RTE_ETH_DEV_INTR_LSC) >>> - slave_eth_dev->data->dev_conf.intr_conf.lsc = 1; >>> + dev_conf.intr_conf.lsc = 1; >> I assume the original intention is making incremental changes to the existing >> slave configuration, if so we should copy the 'slave_eth_dev->data->dev_conf' to >> 'dev_conf' before start updating it. > > The problem is that I don't understand how incremental changes > happen. It simply looks wrong or I don't understand something. > It looks like it is the only place in bonding where slave configuration > is done. > I understand your confusion. Yes, it certainly looks like slave_configure() is doing things wrong by directly modifying the slave's data->dev_conf. If rte_eth_dev_configure() fails, the changes made do get rolled back and become visible anyway despite the device having failed to meet that configuration. rte_eth_dev_configure() handles the rollback, but can't do anything in this case because it doesn't know the device was directly modified. You should make a copy of the dev_conf instead of starting from scratch. There are other capabilities in there that bonding doesn't care about but the application might.
[dpdk-dev] KNI interface vEtho flaps
Hi, I am running KNI example. Once I run KNI, a virtual interface is created and I assign an IP to it. After a few minutes the IP address is gone, ( I assume the interface would have flapped). This issue is seen in random. Can anyone help me out in debugging this issue further. Thanks, Prashanth
Re: [dpdk-dev] [PATCH 1/3] ethdev: add RSS hash level
Hi Ajit, > This patch adds ability to configure RSS hash level in hardware. > This feature will allow an application to select RSS hash calculation > on outer or inner headers for tunneled packets. > > Signed-off-by: Ajit Khaparde > --- > lib/librte_ethdev/rte_ethdev.h | 27 +++ > 1 file changed, 27 insertions(+) > > diff --git a/lib/librte_ethdev/rte_ethdev.h b/lib/librte_ethdev/rte_ethdev.h > index 18a9defc2..5189bdbab 100644 > --- a/lib/librte_ethdev/rte_ethdev.h > +++ b/lib/librte_ethdev/rte_ethdev.h > @@ -444,11 +444,35 @@ struct rte_vlan_filter_conf { > * The *rss_hf* field of the *rss_conf* structure indicates the different > * types of IPv4/IPv6 packets to which the RSS hashing must be applied. > * Supplying an *rss_hf* equal to zero disables the RSS feature. > + * > + * The *rss_level* field of the *rss_conf* structure indicates the > + * Packet encapsulation level RSS hash @p types apply to. > + * > + * - @p 0 requests the default behavior. Depending on the packet > + * type, it can mean outermost, innermost, anything in between or > + * even no RSS. > + * > + * It basically stands for the innermost encapsulation level RSS > + * can be performed on according to PMD and device capabilities. > + * > + * - @p 1 requests RSS to be performed on the outermost packet > + * encapsulation level. > + * > + * - @p 2 and subsequent values request RSS to be performed on the > + * specified inner packet encapsulation level, from outermost to > + * innermost (lower to higher values). If it is just to add ability to do rss over inner header, then do we really need all this concept of 'levels'? Might be just: #define DEV_RX_OFFLOAD_RSS_HASH 0x0008 +#define DEV_RX_OFFLOAD_RSS_HASH_INNER 0x0010 and uint8_t in rte_eth_rss_conf (between key_len and rss_hf) to indicate inner/outer? > + * > + * Support for values other than @p 0 is dependent on the underlying > + * hardware in use. > + * > + * Requesting a specific RSS level on unrecognized traffic results > + * in undefined behavior. > */ > struct rte_eth_rss_conf { > uint8_t *rss_key;/**< If not NULL, 40-byte hash key. */ > uint8_t rss_key_len; /**< hash key length in bytes. */ > uint64_t rss_hf; /**< Hash functions to apply - see below. */ > + uint32_t rss_level; /**< RSS hash level */ > }; > > /* > @@ -599,6 +623,8 @@ rte_eth_rss_hf_refine(uint64_t rss_hf) > ETH_RSS_GENEVE | \ > ETH_RSS_NVGRE) > > +#define ETH_RSS_LEVEL_DEFAULT0 > + > /* > * Definitions used for redirection table entry size. > * Some RSS RETA sizes may not be supported by some drivers, check the > @@ -1103,6 +1129,7 @@ struct rte_eth_conf { > #define DEV_RX_OFFLOAD_SCTP_CKSUM0x0002 > #define DEV_RX_OFFLOAD_OUTER_UDP_CKSUM 0x0004 > #define DEV_RX_OFFLOAD_RSS_HASH 0x0008 > +#define DEV_RX_OFFLOAD_RSS_LEVEL 0x0010 > > #define DEV_RX_OFFLOAD_CHECKSUM (DEV_RX_OFFLOAD_IPV4_CKSUM | \ >DEV_RX_OFFLOAD_UDP_CKSUM | \ > -- > 2.21.0 (Apple Git-122.2)
Re: [dpdk-dev] Admin Queue ENA
Thanks a lot Michal. Will follow approach that you have suggested. Also I see that in case if TSO is enabled we set, 336 /* this param needed only for TSO */ 337 ena_meta->l3_outer_hdr_len = 0; 338 ena_meta->l3_outer_hdr_offset = 0; So even if TSO is enabled should these values be zero. Thanks, Param. On Wed, Dec 4, 2019 at 7:24 PM Michał Krawczyk wrote: > Hi Param, > > Adding atomic operations to setting/clearing comp ctxt won't help, as > there is no race there. The admin queue is designed this way, that > only single completion context can be held, so you should serialize > access to the rte_eth_stats_get(). > If you won't do that, the 2nd thread will try to hold already occupied > context and this will result in disabling admin queue by the ena > communication layer - you won't be able to send further admin > commands. > That's intended behavior and it is caused because you are trying to > get the context with the occupied flag being set to true. Adding > atomic operations there won't change anything, as there will still be > a race between the thread that is waiting for the completion (occupied > flag already send to true) and another thread, that is trying to send > the same command using the same context (can't set occupied to true, > as it's already true) - that should never happen. > > Without totally reworking ena_com admin queue design, we could add > lock in ena_stats_get() - but that'll cause unnecessary locking in all > of the applications that are using it from the main lcore context and > as your design seems to be unique by doing it from multiple threads, > maybe you could add a lock to your calls to the rte_eth_stats_get()? > > Another solution might be using xstats API, which should let you to > get statistics from multiple threads as it's not using admin queue for > that - all stats are being counter internally in the PMD. > > Thanks, > Michal > > > pt., 29 lis 2019 o 13:01 kumaraparameshwaran rathinavel > napisał(a): > > > > Hi Michał, > > > > Thanks for getting back on this. > > > > In our design we are using multiple cores requesting for > rte_eth_stats_get, it is not from one process and hence not serialized. > Since in our design this is not serialized, and hence in get_comp_ctxt() > checking for occupied flag and comp_ctxt_release() are not done atomically > which is causing this issue. Please let me know if my understanding is > correct, so that I will fix the application in such a way that it is done > from one process and not multiple. > > > > Thanks, > > Param. > > > > On Thu, Nov 28, 2019 at 6:44 PM Michał Krawczyk wrote: > >> > >> Hi Param, > >> > >> first of all - you are using very old ena_com. This code comes from > >> the DPDK version before v18.08. If you have any doubts, please check > >> the newer version of the driver and DPDK as the potential bug could be > >> already fixed there. > >> > >> Anyway, if you will look at the function get_comp_ctxt() which is > >> called by __ena_com_submit_admin_cmd() to get the completion context, > >> there is a check for the context if it's not occupied - in case it is > >> (which will be true until comp_ctxt_release() will clear it), the new > >> command using the same context cannot be used. So there shouldn't be > >> two consumers using the same completion contexts. > >> > >> In addition, drivers that are using ena_com are sending admin commands > >> one at a time during the init, so there shouldn't be even 2 commands > >> at a time. The only exception is ena_com_get_dev_basic_stats(), which > >> is called from rte_eth_stats_get() context - but if you consider DPDK > >> application, it should use it on the management lcore after init, so > >> it'll also be serialized. > >> > >> Thanks, > >> Michal > >> > >> > >> > >> pt., 8 lis 2019 o 07:02 kumaraparameshwaran rathinavel > >> napisał(a): > >> > > >> > Hi Michał, > >> > > >> > Please look at the below function, > >> > > >> > static int > >> > ena_com_wait_and_process_admin_cq_polling( > >> > struct ena_comp_ctx *comp_ctx, > >> > struct ena_com_admin_queue *admin_queue) > >> > { > >> > unsigned long flags = 0; > >> > u64 start_time; > >> > int ret; > >> > > >> > start_time = ENA_GET_SYSTEM_USECS(); > >> > > >> > while (comp_ctx->status == ENA_CMD_SUBMITTED) { > >> > if ((ENA_GET_SYSTEM_USECS() - start_time) > > >> > ADMIN_CMD_TIMEOUT_US) { > >> > ena_trc_err("Wait for completion (polling) timeout\n"); > >> > /* ENA didn't have any completion */ > >> > ENA_SPINLOCK_LOCK(admin_queue->q_lock, flags); > >> > admin_queue->stats.no_completion++; > >> > admin_queue->running_state = false; > >> > ENA_SPINLOCK_UNLOCK(admin_queue->q_lock, flags); > >> > > >> > ret = ENA_COM_TIMER_EXPIRED; > >> > goto err; > >> > } > >> > > >> > ENA_SPINLOCK_LOCK(admin_queue->q_lock, flags); > >> >
Re: [dpdk-dev] [PATCH v2 04/17] net/ionic: register and initialize the adapter
> On 2 Dec 2019, at 17:09, Ferruh Yigit wrote: > >> + * There is no room in struct rte_pci_driver to keep a reference >> + * to the adapter, using a static list for the time being. >> + */ >> +static LIST_HEAD(ionic_pci_adapters_list, ionic_adapter) ionic_pci_adapters >> = >> +LIST_HEAD_INITIALIZER(ionic_pci_adapters); > > Why this list is used? Will holding the reference in the private data help? A pci_dev is tied to an adapter, that can be tied to multiple LIFs (logical interfaces), an eth_dev is created with rte_eth_dev_create for each LIF. The reason we have the adapters list is for example to handle eth_ionic_pci_remove which is called on a pci_dev, thus we need to keep the adapter with the list of LIFs (eth_devs) to destroy them. Btw, all other comments have been fixed, a new patch-set is coming. Thank you Alfredo
Re: [dpdk-dev] [PATCH] net/i40e: fix flow control broken
> -Original Message- > From: Sun, GuinanX > Sent: Friday, December 6, 2019 11:41 PM > To: dev@dpdk.org > Cc: Xing, Beilei ; Zhang, Qi Z ; > Yang, Qiming ; Sun, GuinanX > ; sta...@dpdk.org > Subject: [PATCH] net/i40e: fix flow control broken > > Repeat switching flow control on or off during receiving traffic, testpmd > reports > "failed to switch Tx queue occurs" after quit. > The patch fixes the issue. The explain more about the root cause and the fix. > > Fixes: f53577f06925 ("i40e: support flow control") > Cc: sta...@dpdk.org > > Signed-off-by: Guinan Sun > --- > drivers/net/i40e/i40e_ethdev.c | 2 +- > 1 file changed, 1 insertion(+), 1 deletion(-) > > diff --git a/drivers/net/i40e/i40e_ethdev.c b/drivers/net/i40e/i40e_ethdev.c > index 5999c964b..5507f6c39 100644 > --- a/drivers/net/i40e/i40e_ethdev.c > +++ b/drivers/net/i40e/i40e_ethdev.c > @@ -53,7 +53,7 @@ > > /* Wait count and interval */ > #define I40E_CHK_Q_ENA_COUNT 1000 > -#define I40E_CHK_Q_ENA_INTERVAL_US 1000 > +#define I40E_CHK_Q_ENA_INTERVAL_US 5 > > /* Maximun number of VSI */ > #define I40E_MAX_NUM_VSIS (384UL) > -- > 2.17.1
[dpdk-dev] [PATCH v2] net/pcap: truncate packet if it is too large
From: Zhike Wang Previously large packet would be dropped, instead now it is better to keep it via truncating it. Signed-off-by: Zhike Wang --- drivers/net/pcap/rte_eth_pcap.c | 16 ++-- 1 file changed, 6 insertions(+), 10 deletions(-) diff --git a/drivers/net/pcap/rte_eth_pcap.c b/drivers/net/pcap/rte_eth_pcap.c index aa7ef6f..b4c79d1 100644 --- a/drivers/net/pcap/rte_eth_pcap.c +++ b/drivers/net/pcap/rte_eth_pcap.c @@ -313,7 +313,7 @@ struct pmd_devargs_all { struct pcap_pkthdr header; pcap_dumper_t *dumper; unsigned char temp_data[RTE_ETH_PCAP_SNAPLEN]; - size_t len; + size_t len, caplen; pp = rte_eth_devices[dumper_q->port_id].process_private; dumper = pp->tx_dumper[dumper_q->queue_id]; @@ -325,28 +325,24 @@ struct pmd_devargs_all { * dumper */ for (i = 0; i < nb_pkts; i++) { mbuf = bufs[i]; - len = rte_pktmbuf_pkt_len(mbuf); + len = caplen = rte_pktmbuf_pkt_len(mbuf); if (unlikely(!rte_pktmbuf_is_contiguous(mbuf) && len > sizeof(temp_data))) { - PMD_LOG(ERR, - "Dropping multi segment PCAP packet. Size (%zd) > max size (%zd).", - len, sizeof(temp_data)); - rte_pktmbuf_free(mbuf); - continue; + caplen = sizeof(temp_data); } calculate_timestamp(&header.ts); header.len = len; - header.caplen = header.len; + header.caplen = caplen; /* rte_pktmbuf_read() returns a pointer to the data directly * in the mbuf (when the mbuf is contiguous) or, otherwise, * a pointer to temp_data after copying into it. */ pcap_dump((u_char *)dumper, &header, - rte_pktmbuf_read(mbuf, 0, len, temp_data)); + rte_pktmbuf_read(mbuf, 0, caplen, temp_data)); num_tx++; - tx_bytes += len; + tx_bytes += caplen; rte_pktmbuf_free(mbuf); } -- 1.8.3.1
Re: [dpdk-dev] [DPDK] net/virtio: packed ring notification data feature support
> -Original Message- > From: dev On Behalf Of Cheng Jiang > Sent: Wednesday, December 04, 2019 11:03 PM > To: dev@dpdk.org > Cc: maxime.coque...@redhat.com; Bie, Tiwei ; Wang, > Zhihong ; Jiang, Cheng1 > Subject: [dpdk-dev] [DPDK] net/virtio: packed ring notification data > feature support > > This patch supports the feature that the driver passes extra data > (besides identifying the virtqueue) in its device notifications. > > Signed-off-by: Cheng Jiang > --- > drivers/net/virtio/virtio_ethdev.h | 3 ++- > drivers/net/virtio/virtio_pci.c| 15 ++- > drivers/net/virtio/virtio_pci.h| 6 ++ > 3 files changed, 22 insertions(+), 2 deletions(-) > > diff --git a/drivers/net/virtio/virtio_ethdev.h > b/drivers/net/virtio/virtio_ethdev.h > index a10111758..cd8947656 100644 > --- a/drivers/net/virtio/virtio_ethdev.h > +++ b/drivers/net/virtio/virtio_ethdev.h > @@ -36,7 +36,8 @@ >1ULL << VIRTIO_F_IN_ORDER| \ >1ULL << VIRTIO_F_RING_PACKED | \ >1ULL << VIRTIO_F_IOMMU_PLATFORM | \ > - 1ULL << VIRTIO_F_ORDER_PLATFORM) > + 1ULL << VIRTIO_F_ORDER_PLATFORM | \ > + 1ULL << VIRTIO_F_NOTIFICATION_DATA) > > #define VIRTIO_PMD_SUPPORTED_GUEST_FEATURES \ > (VIRTIO_PMD_DEFAULT_GUEST_FEATURES |\ > diff --git a/drivers/net/virtio/virtio_pci.c > b/drivers/net/virtio/virtio_pci.c > index 4468e89cb..2462a7dab 100644 > --- a/drivers/net/virtio/virtio_pci.c > +++ b/drivers/net/virtio/virtio_pci.c > @@ -418,7 +418,20 @@ modern_del_queue(struct virtio_hw *hw, struct > virtqueue *vq) > static void > modern_notify_queue(struct virtio_hw *hw __rte_unused, struct virtqueue > *vq) > { Hi Cheng, hw pointer will be used in notify function, please remove rte_unused attribute. Thanks, Marvin > - rte_write16(vq->vq_queue_index, vq->notify_addr); > + uint32_t notify_data; > + > + if (!vtpci_with_feature(hw, VIRTIO_F_NOTIFICATION_DATA)) { > + rte_write16(vq->vq_queue_index, vq->notify_addr); > + return; > + } > + > + if (vtpci_with_feature(hw, VIRTIO_F_RING_PACKED)) > + notify_data = uint32_t)vq->vq_packed.used_wrap_counter << > 15) | > + vq->vq_avail_idx) << 16) | vq->vq_queue_index; > + else > + notify_data = ((uint32_t)vq->vq_avail_idx << 16) | > + vq->vq_queue_index; > + rte_write32(notify_data, vq->notify_addr); > } > > const struct virtio_pci_ops modern_ops = { > diff --git a/drivers/net/virtio/virtio_pci.h > b/drivers/net/virtio/virtio_pci.h > index a38cb45ad..7433d2f08 100644 > --- a/drivers/net/virtio/virtio_pci.h > +++ b/drivers/net/virtio/virtio_pci.h > @@ -135,6 +135,12 @@ struct virtnet_ctl; > */ > #define VIRTIO_F_ORDER_PLATFORM 36 > > +/* > + * This feature indicates that the driver passes extra data (besides > + * identifying the virtqueue) in its device notifications. > + */ > +#define VIRTIO_F_NOTIFICATION_DATA 38 > + > /* The Guest publishes the used index for which it expects an interrupt > * at the end of the avail ring. Host should ignore the avail->flags > field. */ > /* The Host publishes the avail index for which it expects a kick > -- > 2.17.1
Re: [dpdk-dev] [PATCH v3] build: add dockerfile for building docker image
> -Original Message- > From: Halim, Abdul > Sent: Friday, December 6, 2019 19:13 > To: Yasufumi Ogawa ; Ruifeng Wang (Arm > Technology China) ; dev@dpdk.org > Cc: Kinsella, Ray ; nd ; Richardson, > Bruce > Subject: RE: [dpdk-dev] [PATCH v3] build: add dockerfile for building docker > image > > > > > -Original Message- > > From: Yasufumi Ogawa > > Sent: Thursday, December 5, 2019 7:52 PM > > To: Ruifeng Wang (Arm Technology China) ; > Halim, > > Abdul ; dev@dpdk.org > > Cc: Kinsella, Ray ; nd > > Subject: Re: [dpdk-dev] [PATCH v3] build: add dockerfile for building > > docker image > > > > On 2019/12/05 23:13, Ruifeng Wang (Arm Technology China) wrote: > > > > > >> -Original Message- > > >> From: dev On Behalf Of Abdul Halim > > >> Sent: Tuesday, December 3, 2019 19:42 > > >> To: dev@dpdk.org > > >> Cc: ray.kinse...@intel.com; yasufu...@gmail.com; Abdul Halim > > >> > > >> Subject: [dpdk-dev] [PATCH v3] build: add dockerfile for building > > >> docker image > > >> > > >> Adding a Dockerfile with Ubuntu bionic base image to build dpdk as > > >> shared library. This docker image could be used as base image to > > >> build and run dpdk applications in containers. > > >> > > >> Signed-off-by: Abdul Halim > > >> > > [...] > > >> diff --git a/extras/README.md b/extras/README.md new file mode > > 100644 > > >> index 000..f38d7f1 > > >> --- /dev/null > > >> +++ b/extras/README.md > > >> @@ -0,0 +1,52 @@ > > >> +# Build DPDK Docker image > > >> + > > >> +To build a docker image run the following command from dpdk root > > >> directory. > > >> + > > >> +``` > > >> +DOCKER_TAG="dpdk" > > >> +docker build -t ${DOCKER_TAG} -f extras/Dockerfile.bionic . > > >> +``` > > >> + > > >> +# Example of how to use this dpdk library image > > >> + > > >> +The following steps shows how to use the dpdk shared library > > >> +container to build and run a dpdk application without having to > > >> +build dpdk library for each application. > > >> + > > >> +## Create a dpdk sample app docker file with 'dpdk' as the base > > >> +image > > >> + > > >> +Create a docker file to build the dpdk helloworld application. > > >> +Since, we are creating a docker file for dpdk helloworld app we > > >> +need to add the dpdk source files, thus create the following > > >> +docker file in dpdk root > > >> directory. > > >> + > > >> +``` > > >> +cat << EOF > Dockerfile.dpdkSampleApp FROM dpdk > > >> + > > >> +ADD . /opt/dpdk > > >> + > > >> +WORKDIR /opt/dpdk/examples/helloworld RUN make && cp > > >> +build/helloworld-shared /usr/local/bin/helloworld EOF ``` > > >> + > > >> +## Build sample app docker image > > >> + > > >> +``` > > >> +DOCKERAPP_TAG="dpdk-helloworld" > > >> +docker build -t ${DOCKERAPP_TAG} -f Dockerfile.dpdkSampleApp . > > >> +``` > > > > > > Hi Abdul, > > > > > > I tried the steps on AArch64 platform, and hit error as below: > > > > > > $ sudo docker build -t ${DOCKERAPP_TAG} -f Dockerfile.dpdkSampleApp . > > > Sending build context to Docker daemon 2.55GB > > > Step 1/4 : FROM dpdk > > > ---> 955448007987 > > > Step 2/4 : ADD . /opt/dpdk > > > ---> d8b58019a7e2 > > > Step 3/4 : WORKDIR /opt/dpdk/examples/helloworld > > > ---> Running in 14fc89f7d3cd > > > Removing intermediate container 14fc89f7d3cd > > > ---> 065a682c58fd > > > Step 4/4 : RUN make && cp build/helloworld-shared > > /usr/local/bin/helloworld > > > ---> Running in 11e755a7180b > > > Makefile:44: *** "Please define RTE_SDK environment variable". Stop. > > > The command '/bin/sh -c make && cp build/helloworld-shared > > > /usr/local/bin/helloworld' returned a non-zero code: 2 > > > > > > Missing define of RTE_SDK and RTE_TARGET? > > > > Hi Ruifeng, > > > > I think you run you run the command in dpdk/extras. However, this > > 'Dockerfile.dpdkSampleApp' is expected to be run in dpdk's root dir so > > that it is mounted as '/opt/dpdk' in the second step above. I have > > tested this Dockerfile on Ubuntu 18.04 and compiled without any error. > > RTE_SDK is set correctly, but dpdk's directory is not mounted in the > > container. > > Hi Yasufumi, I ran the command in dpdk root dir which should be correct. The issue was due to shared library image not been correctly built. See below. Thanks. /Ruifeng > > Abdul, > > > > >> +docker build -t ${DOCKERAPP_TAG} -f Dockerfile.dpdkSampleApp . > > > > I think this line should be corrected as following, and make it clear > > it should be run in dpdk's root. > > > >docker build -t ${DOCKERAPP_TAG} -f extras/Dockerfile.dpdkSampleApp . > > > > Even if the container image is built successfully, there is another > > problem in running app because it isn't run in privileged mode. > > > > root@0d2a309dfd2c:/opt/dpdk/examples/helloworld# helloworld > > EAL: Detected 16 lcore(s) > > EAL: Detected 1 NUMA nodes > > ... > > EAL: Failed to get current mempolicy: Operation not permitted. > > Assuming MPOL_DEFAULT. > > set_mempolicy: Operation not permitted > > set_mempolicy: Operation not permit
[dpdk-dev] [PATCH] net/i40e: add PF MDD event handler
From: Zhu Tao This patch adds a handler for malicious driver detection event. We just gave a warning log and a statistical count on the PF. Signed-off-by: Zhu Tao --- doc/guides/nics/i40e.rst | 1 + doc/guides/rel_notes/release_20_02.rst | 4 ++ drivers/net/i40e/i40e_ethdev.c | 87 +- drivers/net/i40e/i40e_ethdev.h | 1 + 4 files changed, 91 insertions(+), 2 deletions(-) diff --git a/doc/guides/nics/i40e.rst b/doc/guides/nics/i40e.rst index 38acf5906..539ea5c57 100644 --- a/doc/guides/nics/i40e.rst +++ b/doc/guides/nics/i40e.rst @@ -43,6 +43,7 @@ Features of the i40e PMD are: - Dynamic Device Personalization (DDP) - Queue region configuration - Virtual Function Port Representors +- Malicious Device Drive event catch and notify Prerequisites - diff --git a/doc/guides/rel_notes/release_20_02.rst b/doc/guides/rel_notes/release_20_02.rst index 0eaa45a76..5f39006a4 100644 --- a/doc/guides/rel_notes/release_20_02.rst +++ b/doc/guides/rel_notes/release_20_02.rst @@ -56,6 +56,10 @@ New Features Also, make sure to start the actual text at the margin. = +* **Updated the Intel i40e driver.** + + Added PF support Malicious Device Drive event catch and notify. + Removed Items - diff --git a/drivers/net/i40e/i40e_ethdev.c b/drivers/net/i40e/i40e_ethdev.c index 5999c964b..87e638196 100644 --- a/drivers/net/i40e/i40e_ethdev.c +++ b/drivers/net/i40e/i40e_ethdev.c @@ -6760,6 +6760,85 @@ i40e_dev_handle_aq_msg(struct rte_eth_dev *dev) rte_free(info.msg_buf); } +static void +i40e_handle_mdd_event(struct rte_eth_dev *dev) +{ + struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private); + struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private); + bool mdd_detected = false; + struct i40e_pf_vf *vf; + uint32_t reg; + int i; + + /* find what triggered the MDD event */ + reg = I40E_READ_REG(hw, I40E_GL_MDET_TX); + if (reg & I40E_GL_MDET_TX_VALID_MASK) { + uint8_t pf_num = (reg & I40E_GL_MDET_TX_PF_NUM_MASK) >> + I40E_GL_MDET_TX_PF_NUM_SHIFT; + uint16_t vf_num = (reg & I40E_GL_MDET_TX_VF_NUM_MASK) >> + I40E_GL_MDET_TX_VF_NUM_SHIFT; + uint8_t event = (reg & I40E_GL_MDET_TX_EVENT_MASK) >> + I40E_GL_MDET_TX_EVENT_SHIFT; + uint16_t queue = ((reg & I40E_GL_MDET_TX_QUEUE_MASK) >> + I40E_GL_MDET_TX_QUEUE_SHIFT) - + hw->func_caps.base_queue; + PMD_DRV_LOG(WARNING, "Malicious Driver Detection event 0x%02x on TX " + "queue %d PF number 0x%02x VF number 0x%02x device %s\n", + event, queue, pf_num, vf_num, dev->data->name); + I40E_WRITE_REG(hw, I40E_GL_MDET_TX, 0x); + mdd_detected = true; + } + reg = I40E_READ_REG(hw, I40E_GL_MDET_RX); + if (reg & I40E_GL_MDET_RX_VALID_MASK) { + uint8_t func = (reg & I40E_GL_MDET_RX_FUNCTION_MASK) >> + I40E_GL_MDET_RX_FUNCTION_SHIFT; + uint8_t event = (reg & I40E_GL_MDET_RX_EVENT_MASK) >> + I40E_GL_MDET_RX_EVENT_SHIFT; + uint16_t queue = ((reg & I40E_GL_MDET_RX_QUEUE_MASK) >> + I40E_GL_MDET_RX_QUEUE_SHIFT) - + hw->func_caps.base_queue; + + PMD_DRV_LOG(WARNING, "Malicious Driver Detection event 0x%02x on RX " + "queue %d of function 0x%02x device %s\n", + event, queue, func, dev->data->name); + I40E_WRITE_REG(hw, I40E_GL_MDET_RX, 0x); + mdd_detected = true; + } + + if (mdd_detected) { + reg = I40E_READ_REG(hw, I40E_PF_MDET_TX); + if (reg & I40E_PF_MDET_TX_VALID_MASK) { + I40E_WRITE_REG(hw, I40E_PF_MDET_TX, 0x); + PMD_DRV_LOG(WARNING, "TX driver issue detected on PF\n"); + } + reg = I40E_READ_REG(hw, I40E_PF_MDET_RX); + if (reg & I40E_PF_MDET_RX_VALID_MASK) { + I40E_WRITE_REG(hw, I40E_PF_MDET_RX, 0x); + PMD_DRV_LOG(WARNING, "RX driver issue detected on PF\n"); + } + } + + /* see if one of the VFs needs its hand slapped */ + for (i = 0; i < pf->vf_num && mdd_detected; i++) { + vf = &pf->vfs[i]; + reg = I40E_READ_REG(hw, I40E_VP_MDET_TX(i)); + if (reg & I40E_VP_MDET_TX_VALID_MASK) { + I40E_WRITE_REG(hw, I40E_VP_MDET_TX(i), 0x); + vf->num_mdd_events++; +
Re: [dpdk-dev] [dpdk-stable] [PATCH] net/bonding: do not inherit slave device configuration
On 12/8/19 6:44 PM, Chas Williams wrote: > On 2019-11-19 07:40, Andrew Rybchenko wrote: >> On 11/19/19 3:18 PM, Ferruh Yigit wrote: >>> On 11/19/2019 9:03 AM, Andrew Rybchenko wrote: Bonding device should control bonded devices configuration. Also avoid usage of slave's data->dev_conf. Fixes: 2efb58cbab6e ("bond: new link bonding library") Cc: sta...@dpdk.org Signed-off-by: Andrew Rybchenko --- drivers/net/bonding/rte_eth_bond_pmd.c | 24 1 file changed, 12 insertions(+), 12 deletions(-) diff --git a/drivers/net/bonding/rte_eth_bond_pmd.c > b/drivers/net/bonding/rte_eth_bond_pmd.c index 707a0f3cdd..4f0e83205d 100644 --- a/drivers/net/bonding/rte_eth_bond_pmd.c +++ b/drivers/net/bonding/rte_eth_bond_pmd.c @@ -1679,6 +1679,7 @@ int slave_configure(struct rte_eth_dev *bonded_eth_dev, struct rte_eth_dev *slave_eth_dev) { + struct rte_eth_conf dev_conf; struct bond_rx_queue *bd_rx_q; struct bond_tx_queue *bd_tx_q; uint16_t nb_rx_queues; @@ -1693,34 +1694,34 @@ slave_configure(struct rte_eth_dev > *bonded_eth_dev, /* Stop slave */ rte_eth_dev_stop(slave_eth_dev->data->port_id); + memset(&dev_conf, 0, sizeof(dev_conf)); + /* Enable interrupts on slave device if supported */ if (slave_eth_dev->data->dev_flags & RTE_ETH_DEV_INTR_LSC) - slave_eth_dev->data->dev_conf.intr_conf.lsc = 1; + dev_conf.intr_conf.lsc = 1; >>> I assume the original intention is making incremental changes to the > existing >>> slave configuration, if so we should copy the > 'slave_eth_dev->data->dev_conf' to >>> 'dev_conf' before start updating it. >> >> The problem is that I don't understand how incremental changes >> happen. It simply looks wrong or I don't understand something. >> It looks like it is the only place in bonding where slave configuration >> is done. >> > > I understand your confusion. Yes, it certainly looks like > slave_configure() is doing things wrong by directly modifying the slave's > data->dev_conf. If rte_eth_dev_configure() fails, the changes made do > get rolled back and become visible anyway despite the device having > failed to meet that configuration. rte_eth_dev_configure() handles the > rollback, but can't do anything in this case because it doesn't know > the device was directly modified. > > You should make a copy of the dev_conf instead of starting from scratch. > There are other capabilities in there that bonding doesn't care about > but the application might. May application configure slave device directly (e.g. before adding in bond) and bonding should respect it? Are there usecases behind? Of course, if an application configures both slaves directly and via bonding device, it could understand the configuration, but it looks very error-prone and over-complicated. Wouldn't it be better if bonding device configuration is passed to slaves? May be the reason behind is that net/bonding does not forward configuration to slaves except RSS configuration right now. Is the behaviour documented anywhere? Of course, any changes in the area would be behaviour change which should be documented in release notes at least or even go through deprecation process.
Re: [dpdk-dev] [PATCH 1/3] ethdev: add RSS hash level
On 12/7/19 10:56 PM, Ajit Khaparde wrote: > On Sat, Dec 7, 2019 at 1:14 AM Andrew Rybchenko > wrote: > >> On 12/7/19 3:59 AM, Ajit Khaparde wrote: >>> This patch adds ability to configure RSS hash level in hardware. >>> This feature will allow an application to select RSS hash calculation >>> on outer or inner headers for tunneled packets. >>> >>> Signed-off-by: Ajit Khaparde >>> --- >>> lib/librte_ethdev/rte_ethdev.h | 27 +++ >>> 1 file changed, 27 insertions(+) >>> >>> diff --git a/lib/librte_ethdev/rte_ethdev.h >> b/lib/librte_ethdev/rte_ethdev.h >>> index 18a9defc2..5189bdbab 100644 >>> --- a/lib/librte_ethdev/rte_ethdev.h >>> +++ b/lib/librte_ethdev/rte_ethdev.h >>> @@ -444,11 +444,35 @@ struct rte_vlan_filter_conf { >>>* The *rss_hf* field of the *rss_conf* structure indicates the >> different >>>* types of IPv4/IPv6 packets to which the RSS hashing must be applied. >>>* Supplying an *rss_hf* equal to zero disables the RSS feature. >>> + * >>> + * The *rss_level* field of the *rss_conf* structure indicates the >>> + * Packet encapsulation level RSS hash @p types apply to. >>> + * >>> + * - @p 0 requests the default behavior. Depending on the packet >>> + * type, it can mean outermost, innermost, anything in between or >>> + * even no RSS. >>> + * >>> + * It basically stands for the innermost encapsulation level RSS >>> + * can be performed on according to PMD and device capabilities. >>> + * >>> + * - @p 1 requests RSS to be performed on the outermost packet >>> + * encapsulation level. >>> + * >>> + * - @p 2 and subsequent values request RSS to be performed on the >>> + * specified inner packet encapsulation level, from outermost to >>> + * innermost (lower to higher values). >>> + * >>> + * Support for values other than @p 0 is dependent on the underlying >>> + * hardware in use. >>> + * >>> + * Requesting a specific RSS level on unrecognized traffic results >>> + * in undefined behavior. >>>*/ >>> struct rte_eth_rss_conf { >>> uint8_t *rss_key;/**< If not NULL, 40-byte hash key. */ >>> uint8_t rss_key_len; /**< hash key length in bytes. */ >>> uint64_t rss_hf; /**< Hash functions to apply - see below. */ >>> + uint32_t rss_level; /**< RSS hash level */ >>> }; >> >> I'm not sure that offload flag is required in this case. >> I think maximum supported rss_level in dev_info will provide > > more information and per-queue level does not make sense I think information about maximum RSS hash level could be useful. At least it provides clear information about limitations instead of attempt to configure with RSS level equal to 3 and getting error without clear understanding why if the level is not supported. >> in this case. Even if per-queue group control is required, >> it should be doable via rte_flow API RSS action. >> > This is dev config and not flow specific configuration. Ofcourse while > passing > the rss_config, not all the queues may be specified, but that is not a new > behavior and it is upto the application anyway. Yes, of course. I was just trying to explain why Rx offload is not required and it should be just dev_info field with maximum RSS level supported. > Are we transitioning the device level configuration to rte_flow/flow > based scheme? No-no, see above. >> Anyway, it looks like it is ABI breakage with all consequences. >> In 64-bit case it is possible to put it before rss_hf to avoid >> ABI breakage, but it will break ABI on 32-bit anyway. >> > Right. > I sent the proposal for review early to get it cleaned up and ready > when the window opens. OK, good. >> >>> /* >>> @@ -599,6 +623,8 @@ rte_eth_rss_hf_refine(uint64_t rss_hf) >>> ETH_RSS_GENEVE | \ >>> ETH_RSS_NVGRE) >>> >>> +#define ETH_RSS_LEVEL_DEFAULT0 >>> + >>> /* >>>* Definitions used for redirection table entry size. >>>* Some RSS RETA sizes may not be supported by some drivers, check the >>> @@ -1103,6 +1129,7 @@ struct rte_eth_conf { >>> #define DEV_RX_OFFLOAD_SCTP_CKSUM 0x0002 >>> #define DEV_RX_OFFLOAD_OUTER_UDP_CKSUM 0x0004 >>> #define DEV_RX_OFFLOAD_RSS_HASH 0x0008 >>> +#define DEV_RX_OFFLOAD_RSS_LEVEL 0x0010 >>> >>> #define DEV_RX_OFFLOAD_CHECKSUM (DEV_RX_OFFLOAD_IPV4_CKSUM | \ >>>DEV_RX_OFFLOAD_UDP_CKSUM | \ >>> >> >>
Re: [dpdk-dev] [PATCH] ethdev: allow multiple security sessions to use one rte flow
On Sun, Dec 8, 2019 at 4:19 PM Anoob Joseph wrote: > > The rte_security API which enables inline protocol/crypto feature > mandates that for every security session an rte_flow is created. This > would internally translate to a rule in the hardware which would do > packet classification. > > In rte_securty, one SA would be one security session. And if an rte_flow > need to be created for every session, the number of SAs supported by an > inline implementation would be limited by the number of rte_flows the > PMD would be able to support. > > If the fields SPI & IP addresses are allowed to be a range, then this > limitation can be overcome. Multiple flows will be able to use one rule > for SECURITY processing. In this case, the security session provided as > conf would be NULL. > > Application should do an rte_flow_validate() to make sure the flow is > supported on the PMD. > > Signed-off-by: Anoob Joseph Reviewed-by: Jerin Jacob > --- > lib/librte_ethdev/rte_flow.h | 6 ++ > 1 file changed, 6 insertions(+) > > diff --git a/lib/librte_ethdev/rte_flow.h b/lib/librte_ethdev/rte_flow.h > index 452d359..21fa7ed 100644 > --- a/lib/librte_ethdev/rte_flow.h > +++ b/lib/librte_ethdev/rte_flow.h > @@ -2239,6 +2239,12 @@ struct rte_flow_action_meter { > * direction. > * > * Multiple flows can be configured to use the same security session. > + * > + * The NULL value is allowed for security session. If security session is > NULL, > + * then SPI field in ESP flow item and IP addresses in flow items 'IPv4' and > + * 'IPv6' will be allowed to be a range. The rule thus created can enable > + * SECURITY processing on multiple flows. > + * > */ > struct rte_flow_action_security { > void *security_session; /**< Pointer to security session structure. */ > -- > 2.7.4 >
Re: [dpdk-dev] [PATCH 01/15] common/octeontx2: add CPT LF mbox for inline inbound
On Sun, Dec 8, 2019 at 5:25 PM Anoob Joseph wrote: > > Adding the new mbox introduced to configure CPT LF to be used for inline > inbound. > > Signed-off-by: Anoob Joseph > Signed-off-by: Tejasree Kondoj Acked-by: Jerin Jacob > --- > drivers/common/octeontx2/otx2_mbox.h | 7 +++ > 1 file changed, 7 insertions(+) > > diff --git a/drivers/common/octeontx2/otx2_mbox.h > b/drivers/common/octeontx2/otx2_mbox.h > index e0e4e2f..70452d1 100644 > --- a/drivers/common/octeontx2/otx2_mbox.h > +++ b/drivers/common/octeontx2/otx2_mbox.h > @@ -193,6 +193,8 @@ M(CPT_SET_CRYPTO_GRP, 0xA03, cpt_set_crypto_grp, > \ >msg_rsp) \ > M(CPT_INLINE_IPSEC_CFG, 0xA04, cpt_inline_ipsec_cfg, \ >cpt_inline_ipsec_cfg_msg, msg_rsp) \ > +M(CPT_RX_INLINE_LF_CFG, 0xBFE, cpt_rx_inline_lf_cfg, \ > + cpt_rx_inline_lf_cfg_msg, msg_rsp) \ > /* NPC mbox IDs (range 0x6000 - 0x7FFF) */ \ > M(NPC_MCAM_ALLOC_ENTRY,0x6000, npc_mcam_alloc_entry, > \ > npc_mcam_alloc_entry_req, \ > @@ -1202,6 +1204,11 @@ struct cpt_inline_ipsec_cfg_msg { > uint16_t __otx2_io nix_pf_func; /* Outbound path NIX_PF_FUNC */ > }; > > +struct cpt_rx_inline_lf_cfg_msg { > + struct mbox_msghdr hdr; > + uint16_t __otx2_io sso_pf_func; > +}; > + > /* NPC mbox message structs */ > > #define NPC_MCAM_ENTRY_INVALID 0x > -- > 2.7.4 >
Re: [dpdk-dev] [PATCH 02/15] common/octeontx2: add routine to check if rte_eth_dev belongs to otx2
On Sun, Dec 8, 2019 at 5:25 PM Anoob Joseph wrote: > > From: Vamsi Attunuru > > This routine returns true if given rte_eth_dev belongs to octeontx2 > > Signed-off-by: Anoob Joseph > Signed-off-by: Tejasree Kondoj > Signed-off-by: Vamsi Attunuru > --- > drivers/common/octeontx2/otx2_common.c | 20 > > drivers/common/octeontx2/otx2_common.h | 2 ++ > .../octeontx2/rte_common_octeontx2_version.map | 1 + > 3 files changed, 23 insertions(+) > > diff --git a/drivers/common/octeontx2/otx2_common.c > b/drivers/common/octeontx2/otx2_common.c > index 7e45366..116db0f 100644 > --- a/drivers/common/octeontx2/otx2_common.c > +++ b/drivers/common/octeontx2/otx2_common.c > @@ -3,6 +3,7 @@ > */ > > #include > +#include > #include > #include > > @@ -23,6 +24,25 @@ otx2_npa_set_defaults(struct otx2_idev_cfg *idev) > > /** > * @internal > + * Check if rte_eth_dev is otx2_eth_dev > + */ > +uint8_t > +otx2_is_ethdev(struct rte_eth_dev *eth_dev) > +{ > + struct rte_pci_device *pci_dev; > + > + pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev); > + > + if (pci_dev->id.device_id == PCI_DEVID_OCTEONTX2_RVU_PF || > + pci_dev->id.device_id == PCI_DEVID_OCTEONTX2_RVU_VF || > + pci_dev->id.device_id == PCI_DEVID_OCTEONTX2_RVU_AF_VF) Is PCI_DEVID_OCTEONTX2_RVU_SDP_VF and PCI_DEVID_OCTEONTX2_RVU_SDP_PF also comes here? If yes. update the patch. > + return 1; > + > + return 0; > +} > + > +/** > + * @internal > * Get intra device config structure. > */ > struct otx2_idev_cfg * > diff --git a/drivers/common/octeontx2/otx2_common.h > b/drivers/common/octeontx2/otx2_common.h > index f62c45d..d32e59a 100644 > --- a/drivers/common/octeontx2/otx2_common.h > +++ b/drivers/common/octeontx2/otx2_common.h > @@ -8,6 +8,7 @@ > #include > #include > #include > +#include > #include > #include > #include > @@ -70,6 +71,7 @@ struct otx2_idev_cfg *otx2_intra_dev_get_cfg(void); > void otx2_sso_pf_func_set(uint16_t sso_pf_func); > uint16_t otx2_sso_pf_func_get(void); > uint16_t otx2_npa_pf_func_get(void); > +uint8_t otx2_is_ethdev(struct rte_eth_dev *eth_dev); > struct otx2_npa_lf *otx2_npa_lf_obj_get(void); > void otx2_npa_set_defaults(struct otx2_idev_cfg *idev); > int otx2_npa_lf_active(void *dev); > diff --git a/drivers/common/octeontx2/rte_common_octeontx2_version.map > b/drivers/common/octeontx2/rte_common_octeontx2_version.map > index adad21a..dac2283 100644 > --- a/drivers/common/octeontx2/rte_common_octeontx2_version.map > +++ b/drivers/common/octeontx2/rte_common_octeontx2_version.map > @@ -6,6 +6,7 @@ DPDK_20.0 { > otx2_dev_priv_init; > otx2_disable_irqs; > otx2_intra_dev_get_cfg; > + otx2_is_ethdev; > otx2_logtype_base; > otx2_logtype_dpi; > otx2_logtype_mbox; > -- > 2.7.4 >
Re: [dpdk-dev] discussion: creating a new class for vdpa driversxiao.w.w...@intel.com
On 12/8/19 10:06 AM, Matan Azrad wrote: > From: Andrew Rybchenko >> On 12/6/19 8:32 AM, Liang, Cunming wrote: >>> >>> -Original Message- From: Bie, Tiwei Sent: Friday, December 6, 2019 12:28 PM To: Matan Azrad Cc: Wang, Xiao W ; Thomas Monjalon ; maxime.coque...@redhat.com; Wang, >> Zhihong ; Yigit, Ferruh ; Shahaf Shuler ; Ori Kam >> ; dev@dpdk.org; Slava Ovsiienko ; Asaf >> Penso ; Olga Shern ; Liang, >> Cunming Subject: Re: discussion: creating a new class for vdpa driversxiao.w.w...@intel.com On Thu, Dec 05, 2019 at 01:26:36PM +, Matan Azrad wrote: > Hi all > > As described in RFC “[RFC] net: new vdpa PMD for Mellanox devices”, > a new vdpa drivers is going to be added for Mellanox devices – > mlx5_vdpa > > The only vdpa driver now is the IFC driver that is located in net > directory. > > The IFC driver and the new mlx5_vdpa driver provide the vdpa ops and > not the eth_dev ops. > > All the others drivers in net provide the eth-dev ops. > > I suggest to create a new class for vdpa drivers, to move IFC to > this class and to add the mlx5_vdpa to this class too. > > Later, all the new drivers that implements the vdpa ops will be > added to the vdpa class. +1. Sounds like a good idea to me. >>> +1 >> >> vDPA drivers are vendor-specific and expected to talk to vendor NIC. I.e. >> there are significant chances to share code with network drivers (e.g. base >> driver). Should base driver be moved to drivers/common in this case or is it >> still allows to have vdpa driver in drivers/net together with ethdev driver? > > Yes, I think this should be the method, shared code should be moved to the > drivers/common directory. > I think there is a precedence with shared code in common which shares a > vendor specific code between crypto and net. I see motivation behind driver/vdpa. However, vdpa and net drivers tightly related and I would prefer to avoid extra complexity here. Right now simplicity over-weights for me. No strong opinion on the topic, but it definitely requires better and more clear motivation why a new class should be introduced and existing drivers restructured. > Actually, this is my plan to share mlx5 vdpa code with mlx5 net code by the > drivers/common dir (see RFC). I see.
Re: [dpdk-dev] [PATCH 03/15] crypto/octeontx2: configure for inline IPsec
On Sun, Dec 8, 2019 at 5:26 PM Anoob Joseph wrote: > > From: Tejasree Kondoj > > For enabling outbound inline IPsec, a CPT queue needs to be tied > to a NIX PF_FUNC. Distribute CPT queues fairly among all availble > otx2 eth ports. > > For inbound, one CPT LF will be assigned and initialized by kernel. > > Signed-off-by: Ankur Dwivedi > Signed-off-by: Anoob Joseph > Signed-off-by: Archana Muniganti > Signed-off-by: Tejasree Kondoj > Signed-off-by: Vamsi Attunuru > > +static int > +otx2_cpt_qp_inline_cfg(const struct rte_cryptodev *dev, struct otx2_cpt_qp > *qp) > +{ > + static rte_atomic16_t port_offset = RTE_ATOMIC16_INIT(-1); > + uint16_t port_id, nb_ethport = rte_eth_dev_count_avail(); > + int i, ret; > + > + for (i = 0; i < nb_ethport; i++) { > + port_id = rte_atomic16_add_return(&port_offset, 1) % > nb_ethport; > + if (otx2_is_ethdev(&rte_eth_devices[port_id])) > + break; > + } > + > + if (i >= nb_ethport) > + return 0; > + > + ret = otx2_cpt_qp_ethdev_bind(dev, qp, port_id); > + if (ret) > + return ret; > + > + return 0; Last five lines can be replaced with "return otx2_cpt_qp_ethdev_bind(dev, qp, port_id)" Across the patch series, the above pattern is common, Please fix in all relevant instances.
Re: [dpdk-dev] [PATCH 05/15] crypto/octeontx2: add security in eth dev configure
On Sun, Dec 8, 2019 at 5:26 PM Anoob Joseph wrote: > > From: Tejasree Kondoj > > Adding security in eth device configure. > > Signed-off-by: Ankur Dwivedi > Signed-off-by: Anoob Joseph > Signed-off-by: Archana Muniganti > Signed-off-by: Tejasree Kondoj > Signed-off-by: Vamsi Attunuru > --- > doc/guides/nics/octeontx2.rst | 20 + > drivers/common/octeontx2/otx2_common.h | 4 + > drivers/crypto/octeontx2/Makefile | 2 +- > drivers/crypto/octeontx2/meson.build| 1 + > drivers/crypto/octeontx2/otx2_cryptodev.c | 2 + > drivers/crypto/octeontx2/otx2_ipsec_fp.h| 55 + > drivers/crypto/octeontx2/otx2_security.c| 122 > > drivers/crypto/octeontx2/otx2_security.h| 4 + > drivers/net/octeontx2/otx2_ethdev.c | 22 - > drivers/net/octeontx2/otx2_ethdev.h | 2 + > drivers/net/octeontx2/otx2_ethdev_devargs.c | 19 + Add "Inline crypto" feature in doc/guides/nics/features/octeontx2*.ini
Re: [dpdk-dev] [EXT] Re: [PATCH 03/15] crypto/octeontx2: configure for inline IPsec
Hi Jerin, Please see inline. Thanks, Anoob > -Original Message- > From: Jerin Jacob > Sent: Monday, December 9, 2019 1:17 PM > To: Anoob Joseph > Cc: Akhil Goyal ; Declan Doherty > ; Thomas Monjalon ; > Tejasree Kondoj ; Jerin Jacob Kollanukkaran > ; Narayana Prasad Raju Athreya > ; Kiran Kumar Kokkilagadda > ; Nithin Kumar Dabilpuram > ; Pavan Nikhilesh Bhagavatula > ; Ankur Dwivedi ; > Archana Muniganti ; Vamsi Krishna Attunuru > ; Lukas Bartosik ; dpdk-dev > > Subject: [EXT] Re: [dpdk-dev] [PATCH 03/15] crypto/octeontx2: configure for > inline IPsec > > External Email > > -- > On Sun, Dec 8, 2019 at 5:26 PM Anoob Joseph wrote: > > > > From: Tejasree Kondoj > > > > For enabling outbound inline IPsec, a CPT queue needs to be tied to a > > NIX PF_FUNC. Distribute CPT queues fairly among all availble > > otx2 eth ports. > > > > For inbound, one CPT LF will be assigned and initialized by kernel. > > > > Signed-off-by: Ankur Dwivedi > > Signed-off-by: Anoob Joseph > > Signed-off-by: Archana Muniganti > > Signed-off-by: Tejasree Kondoj > > Signed-off-by: Vamsi Attunuru > > > > > +static int > > +otx2_cpt_qp_inline_cfg(const struct rte_cryptodev *dev, struct > > +otx2_cpt_qp *qp) { > > + static rte_atomic16_t port_offset = RTE_ATOMIC16_INIT(-1); > > + uint16_t port_id, nb_ethport = rte_eth_dev_count_avail(); > > + int i, ret; > > + > > + for (i = 0; i < nb_ethport; i++) { > > + port_id = rte_atomic16_add_return(&port_offset, 1) % > > nb_ethport; > > + if (otx2_is_ethdev(&rte_eth_devices[port_id])) > > + break; > > + } > > + > > + if (i >= nb_ethport) > > + return 0; > > + > > + ret = otx2_cpt_qp_ethdev_bind(dev, qp, port_id); > > + if (ret) > > + return ret; > > + > > + return 0; > > Last five lines can be replaced with "return otx2_cpt_qp_ethdev_bind(dev, qp, > port_id)" [Anoob] In one of the following patches, one more call would be introduced after the call to otx2_cpt_qp_ethdev_bind(). So the above lines will have to be introduced anyway. For the last such addition, I'll make it return directly. Is that fine? > > Across the patch series, the above pattern is common, Please fix in all > relevant > instances.
[dpdk-dev] [PATCH v1] net/virtio-user: fix packed ring server mode
This patch fixes the situation where datapath does not work properly when vhost reconnects to virtio in server mode with packed ring. Currently, virtio and vhost share memory of vring. For split ring, vhost can read the status of discriptors directly from the available ring and the used ring during reconnection. Therefore, the datapath can continue. But for packed ring, when reconnecting to virtio, vhost cannot get the status of discriptors only through the descriptor ring. By resetting Tx and Rx queues, the datapath can restart from the beginning. Fixes: 4c3f5822eb214 ("net/virtio: add packed virtqueue defines") Cc: sta...@dpdk.org Signed-off-by: Xuan Ding --- drivers/net/virtio/virtio_ethdev.c | 112 +++- drivers/net/virtio/virtio_ethdev.h | 3 + drivers/net/virtio/virtio_user_ethdev.c | 8 ++ 3 files changed, 121 insertions(+), 2 deletions(-) diff --git a/drivers/net/virtio/virtio_ethdev.c b/drivers/net/virtio/virtio_ethdev.c index 044eb10a7..c0cb0f23c 100644 --- a/drivers/net/virtio/virtio_ethdev.c +++ b/drivers/net/virtio/virtio_ethdev.c @@ -433,6 +433,94 @@ virtio_init_vring(struct virtqueue *vq) virtqueue_disable_intr(vq); } +static int +virtio_user_reset_rx_queues(struct rte_eth_dev *dev, uint16_t queue_idx) +{ + uint16_t vtpci_queue_idx = 2 * queue_idx + VTNET_SQ_RQ_QUEUE_IDX; + struct virtio_hw *hw = dev->data->dev_private; + struct virtqueue *vq = hw->vqs[vtpci_queue_idx]; + struct virtnet_rx *rxvq; + struct vq_desc_extra *dxp; + unsigned int vq_size; + uint16_t desc_idx, i; + + vq_size = VTPCI_OPS(hw)->get_queue_num(hw, vtpci_queue_idx); + + vq->vq_packed.used_wrap_counter = 1; + vq->vq_packed.cached_flags = VRING_PACKED_DESC_F_AVAIL; + vq->vq_packed.event_flags_shadow = 0; + vq->vq_packed.cached_flags |= VRING_DESC_F_WRITE; + + rxvq = &vq->rxq; + memset(rxvq->mz->addr, 0, rxvq->mz->len); + + for (desc_idx = 0; desc_idx < vq_size; desc_idx++) { + dxp = &vq->vq_descx[desc_idx]; + if (dxp->cookie != NULL) { + rte_pktmbuf_free(dxp->cookie); + dxp->cookie = NULL; + } + } + + virtio_init_vring(vq); + + for (i = 0; i < hw->max_queue_pairs; i++) + if (rxvq->mpool != NULL) + virtio_dev_rx_queue_setup_finish(dev, i); + + return 0; +} + +static int +virtio_user_reset_tx_queues(struct rte_eth_dev *dev, uint16_t queue_idx) +{ + uint8_t vtpci_queue_idx = 2 * queue_idx + VTNET_SQ_TQ_QUEUE_IDX; + struct virtio_hw *hw = dev->data->dev_private; + struct virtqueue *vq = hw->vqs[vtpci_queue_idx]; + struct virtnet_tx *txvq; + struct vq_desc_extra *dxp; + unsigned int vq_size; + uint16_t desc_idx; + + vq_size = VTPCI_OPS(hw)->get_queue_num(hw, vtpci_queue_idx); + + vq->vq_packed.used_wrap_counter = 1; + vq->vq_packed.cached_flags = VRING_PACKED_DESC_F_AVAIL; + vq->vq_packed.event_flags_shadow = 0; + + txvq = &vq->txq; + memset(txvq->mz->addr, 0, txvq->mz->len); + memset(txvq->virtio_net_hdr_mz->addr, 0, + txvq->virtio_net_hdr_mz->len); + + for (desc_idx = 0; desc_idx < vq_size; desc_idx++) { + dxp = &vq->vq_descx[desc_idx]; + if (dxp->cookie != NULL) { + rte_pktmbuf_free(dxp->cookie); + dxp->cookie = NULL; + } + } + + virtio_init_vring(vq); + + return 0; +} + +static int +virtio_user_reset_queues(struct rte_eth_dev *eth_dev) +{ + uint16_t i; + + /* Vring reset for each Tx queue and Rx queue. */ + for (i = 0; i < eth_dev->data->nb_rx_queues; i++) + virtio_user_reset_rx_queues(eth_dev, i); + + for (i = 0; i < eth_dev->data->nb_rx_queues; i++) + virtio_user_reset_tx_queues(eth_dev, i); + + return 0; +} + static int virtio_init_queue(struct rte_eth_dev *dev, uint16_t vtpci_queue_idx) { @@ -1913,6 +2001,8 @@ eth_virtio_dev_init(struct rte_eth_dev *eth_dev) goto err_vtpci_init; } + rte_spinlock_init(&hw->state_lock); + /* reset device and negotiate default features */ ret = virtio_init_device(eth_dev, VIRTIO_PMD_DEFAULT_GUEST_FEATURES); if (ret < 0) @@ -2155,8 +2245,6 @@ virtio_dev_configure(struct rte_eth_dev *dev) return -EBUSY; } - rte_spinlock_init(&hw->state_lock); - hw->use_simple_rx = 1; if (vtpci_with_feature(hw, VIRTIO_F_IN_ORDER)) { @@ -2421,6 +2509,26 @@ virtio_dev_vlan_offload_set(struct rte_eth_dev *dev, int mask) return 0; } +int +virtio_user_reset_device(struct rte_eth_dev *eth_dev, struct virtio_hw *hw) +{ + /* Add lock to avoid queue contention. */ + rte_spinlock_lock(&hw->state_lock); + hw->started
Re: [dpdk-dev] [EXT] Re: [PATCH 03/15] crypto/octeontx2: configure for inline IPsec
On Mon, Dec 9, 2019 at 1:22 PM Anoob Joseph wrote: > > Hi Jerin, > > Please see inline. > > Thanks, > Anoob > > > -Original Message- > > From: Jerin Jacob > > Sent: Monday, December 9, 2019 1:17 PM > > To: Anoob Joseph > > Cc: Akhil Goyal ; Declan Doherty > > ; Thomas Monjalon ; > > Tejasree Kondoj ; Jerin Jacob Kollanukkaran > > ; Narayana Prasad Raju Athreya > > ; Kiran Kumar Kokkilagadda > > ; Nithin Kumar Dabilpuram > > ; Pavan Nikhilesh Bhagavatula > > ; Ankur Dwivedi ; > > Archana Muniganti ; Vamsi Krishna Attunuru > > ; Lukas Bartosik ; dpdk-dev > > > > Subject: [EXT] Re: [dpdk-dev] [PATCH 03/15] crypto/octeontx2: configure for > > inline IPsec > > > > External Email > > > > -- > > On Sun, Dec 8, 2019 at 5:26 PM Anoob Joseph wrote: > > > > > > From: Tejasree Kondoj > > > > > > For enabling outbound inline IPsec, a CPT queue needs to be tied to a > > > NIX PF_FUNC. Distribute CPT queues fairly among all availble > > > otx2 eth ports. > > > > > > For inbound, one CPT LF will be assigned and initialized by kernel. > > > > > > Signed-off-by: Ankur Dwivedi > > > Signed-off-by: Anoob Joseph > > > Signed-off-by: Archana Muniganti > > > Signed-off-by: Tejasree Kondoj > > > Signed-off-by: Vamsi Attunuru > > > > > > > > +static int > > > +otx2_cpt_qp_inline_cfg(const struct rte_cryptodev *dev, struct > > > +otx2_cpt_qp *qp) { > > > + static rte_atomic16_t port_offset = RTE_ATOMIC16_INIT(-1); > > > + uint16_t port_id, nb_ethport = rte_eth_dev_count_avail(); > > > + int i, ret; > > > + > > > + for (i = 0; i < nb_ethport; i++) { > > > + port_id = rte_atomic16_add_return(&port_offset, 1) % > > > nb_ethport; > > > + if (otx2_is_ethdev(&rte_eth_devices[port_id])) > > > + break; > > > + } > > > + > > > + if (i >= nb_ethport) > > > + return 0; > > > + > > > + ret = otx2_cpt_qp_ethdev_bind(dev, qp, port_id); > > > + if (ret) > > > + return ret; > > > + > > > + return 0; > > > > Last five lines can be replaced with "return otx2_cpt_qp_ethdev_bind(dev, > > qp, > > port_id)" > > [Anoob] In one of the following patches, one more call would be introduced > after the call to otx2_cpt_qp_ethdev_bind(). So the above lines will have to > be introduced anyway. For the last such addition, I'll make it return > directly. Is that fine? Yes, > > > > > Across the patch series, the above pattern is common, Please fix in all > > relevant > > instances.