Introduce new JUMP_TO_TABLE and JUMP_TO_TABLE_INDEX actions. They allow bypassing a hierarchy of groups and going directly to a specified flow table. That gives a user the flexibility to jump between different priorities in a group and eliminates the need to do a table lookup in the group hierarchy.
The JUMP_TO_TABLE action redirects a packet to a regular flow group with the pattern-based flow rules insertion type. The JUMP_TO_TABLE_INDEX action forwards a packet to the specified rule index inside the index-based flow table. The current index-based flow table doesn't do any matching on the packet and executes the actions immediately. Add a new index-based flow table with pattern matching. The JUMP_TO_TABLE_INDEX can redirect a packet to another matching criteria at the specified index in this case. Signed-off-by: Alexander Kozyrev <akozy...@nvidia.com> --- lib/ethdev/rte_flow.c | 58 ++++++++++++++++++++++ lib/ethdev/rte_flow.h | 95 ++++++++++++++++++++++++++++++++++++ lib/ethdev/rte_flow_driver.h | 14 ++++++ lib/ethdev/version.map | 4 ++ 4 files changed, 171 insertions(+) diff --git a/lib/ethdev/rte_flow.c b/lib/ethdev/rte_flow.c index 4076ae4ee1..91f1293bc8 100644 --- a/lib/ethdev/rte_flow.c +++ b/lib/ethdev/rte_flow.c @@ -275,6 +275,8 @@ static const struct rte_flow_desc_data rte_flow_desc_action[] = { MK_FLOW_ACTION(PROG, sizeof(struct rte_flow_action_prog)), MK_FLOW_ACTION(NAT64, sizeof(struct rte_flow_action_nat64)), + MK_FLOW_ACTION(JUMP_TO_TABLE, sizeof(struct rte_flow_action_jump_to_table)), + MK_FLOW_ACTION(JUMP_TO_TABLE_INDEX, sizeof(struct rte_flow_action_jump_to_table_index)), }; int @@ -2109,6 +2111,43 @@ rte_flow_async_create_by_index(uint16_t port_id, user_data, error); } +struct rte_flow * +rte_flow_async_create_by_index_with_pattern(uint16_t port_id, + uint32_t queue_id, + const struct rte_flow_op_attr *op_attr, + struct rte_flow_template_table *template_table, + uint32_t rule_index, + const struct rte_flow_item pattern[], + uint8_t pattern_template_index, + const struct rte_flow_action actions[], + uint8_t actions_template_index, + void *user_data, + struct rte_flow_error *error) +{ + struct rte_eth_dev *dev = &rte_eth_devices[port_id]; + +#ifdef RTE_FLOW_DEBUG + if (!rte_eth_dev_is_valid_port(port_id)) { + rte_flow_error_set(error, ENODEV, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, + rte_strerror(ENODEV)); + return NULL; + } + if (dev->flow_fp_ops == NULL || + dev->flow_fp_ops->async_create_by_index_with_pattern == NULL) { + rte_flow_error_set(error, ENOSYS, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, + rte_strerror(ENOSYS)); + return NULL; + } +#endif + + return dev->flow_fp_ops->async_create_by_index_with_pattern(dev, queue_id, + op_attr, template_table, + rule_index, + pattern, pattern_template_index, + actions, actions_template_index, + user_data, error); +} + int rte_flow_async_destroy(uint16_t port_id, uint32_t queue_id, @@ -2733,6 +2772,24 @@ rte_flow_dummy_async_create_by_index(struct rte_eth_dev *dev __rte_unused, return NULL; } +static struct rte_flow * +rte_flow_dummy_async_create_by_index_with_pattern(struct rte_eth_dev *dev __rte_unused, + uint32_t queue __rte_unused, + const struct rte_flow_op_attr *attr __rte_unused, + struct rte_flow_template_table *table __rte_unused, + uint32_t rule_index __rte_unused, + const struct rte_flow_item items[] __rte_unused, + uint8_t pattern_template_index __rte_unused, + const struct rte_flow_action actions[] __rte_unused, + uint8_t action_template_index __rte_unused, + void *user_data __rte_unused, + struct rte_flow_error *error) +{ + rte_flow_error_set(error, ENOSYS, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, + rte_strerror(ENOSYS)); + return NULL; +} + static int rte_flow_dummy_async_actions_update(struct rte_eth_dev *dev __rte_unused, uint32_t queue_id __rte_unused, @@ -2898,6 +2955,7 @@ rte_flow_dummy_async_action_list_handle_query_update( struct rte_flow_fp_ops rte_flow_fp_default_ops = { .async_create = rte_flow_dummy_async_create, .async_create_by_index = rte_flow_dummy_async_create_by_index, + .async_create_by_index_with_pattern = rte_flow_dummy_async_create_by_index_with_pattern, .async_actions_update = rte_flow_dummy_async_actions_update, .async_destroy = rte_flow_dummy_async_destroy, .push = rte_flow_dummy_push, diff --git a/lib/ethdev/rte_flow.h b/lib/ethdev/rte_flow.h index f864578f80..6e3ffbf558 100644 --- a/lib/ethdev/rte_flow.h +++ b/lib/ethdev/rte_flow.h @@ -3262,6 +3262,24 @@ enum rte_flow_action_type { * @see struct rte_flow_action_nat64 */ RTE_FLOW_ACTION_TYPE_NAT64, + + /** + * RTE_FLOW_ACTION_TYPE_JUMP_TO_TABLE, + * + * Redirects packets to a particular flow table. + * + * @see struct rte_flow_action_jump_to_table. + */ + RTE_FLOW_ACTION_TYPE_JUMP_TO_TABLE, + + /** + * RTE_FLOW_ACTION_TYPE_JUMP_TO_TABLE_INDEX, + * + * Redirects packets to a particular index in a flow table. + * + * @see struct rte_flow_action_jump_to_table_index. + */ + RTE_FLOW_ACTION_TYPE_JUMP_TO_TABLE_INDEX, }; /** @@ -4266,6 +4284,26 @@ rte_flow_dynf_metadata_set(struct rte_mbuf *m, uint32_t v) *RTE_FLOW_DYNF_METADATA(m) = v; } +/** + * RTE_FLOW_ACTION_TYPE_JUMP_TO_TABLE + * + * Redirects packets to a particular flow table. + */ +struct rte_flow_action_jump_to_table { + struct rte_flow_template_table *table; +}; + +/** + * RTE_FLOW_ACTION_TYPE_JUMP_TO_TABLE_INDEX + * + * Redirects packets to a particular flow table. + */ +struct rte_flow_action_jump_to_table_index { + struct rte_flow_template_table *table; + uint32_t index; +}; + + /** * Definition of a single action. * @@ -5898,6 +5936,10 @@ enum rte_flow_table_insertion_type { * Index-based insertion. */ RTE_FLOW_TABLE_INSERTION_TYPE_INDEX, + /** + * Index-based insertion with pattern. + */ + RTE_FLOW_TABLE_INSERTION_TYPE_INDEX_WITH_PATTERN, }; /** @@ -6183,6 +6225,59 @@ rte_flow_async_create_by_index(uint16_t port_id, void *user_data, struct rte_flow_error *error); +/** + * @warning + * @b EXPERIMENTAL: this API may change without prior notice. + * + * Enqueue rule creation operation. + * + * @param port_id + * Port identifier of Ethernet device. + * @param queue_id + * Flow queue used to insert the rule. + * @param[in] op_attr + * Rule creation operation attributes. + * @param[in] template_table + * Template table to select templates from. + * @param[in] rule_index + * Rule index in the table. + * Inserting a rule to already occupied index results in undefined behavior. + * @param[in] pattern + * List of pattern items to be used. + * The list order should match the order in the pattern template. + * The spec is the only relevant member of the item that is being used. + * @param[in] pattern_template_index + * Pattern template index in the table. + * @param[in] actions + * List of actions to be used. + * The list order should match the order in the actions template. + * @param[in] actions_template_index + * Actions template index in the table. + * @param[in] user_data + * The user data that will be returned on the completion events. + * @param[out] error + * Perform verbose error reporting if not NULL. + * PMDs initialize this structure in case of error only. + * + * @return + * Handle on success, NULL otherwise and rte_errno is set. + * The rule handle doesn't mean that the rule has been populated. + * Only completion result indicates that if there was success or failure. + */ +__rte_experimental +struct rte_flow * +rte_flow_async_create_by_index_with_pattern(uint16_t port_id, + uint32_t queue_id, + const struct rte_flow_op_attr *op_attr, + struct rte_flow_template_table *template_table, + uint32_t rule_index, + const struct rte_flow_item pattern[], + uint8_t pattern_template_index, + const struct rte_flow_action actions[], + uint8_t actions_template_index, + void *user_data, + struct rte_flow_error *error); + /** * @warning * @b EXPERIMENTAL: this API may change without prior notice. diff --git a/lib/ethdev/rte_flow_driver.h b/lib/ethdev/rte_flow_driver.h index 506d1262ab..e1fb4c6088 100644 --- a/lib/ethdev/rte_flow_driver.h +++ b/lib/ethdev/rte_flow_driver.h @@ -319,6 +319,19 @@ typedef struct rte_flow *(*rte_flow_async_create_by_index_t)(struct rte_eth_dev void *user_data, struct rte_flow_error *error); +/** @internal Enqueue rule creation by index with pattern operation. */ +typedef struct rte_flow *(*rte_flow_async_create_by_index_with_pattern_t)(struct rte_eth_dev *dev, + uint32_t queue, + const struct rte_flow_op_attr *attr, + struct rte_flow_template_table *table, + uint32_t rule_index, + const struct rte_flow_item *items, + uint8_t pattern_template_index, + const struct rte_flow_action *actions, + uint8_t action_template_index, + void *user_data, + struct rte_flow_error *error); + /** @internal Enqueue rule update operation. */ typedef int (*rte_flow_async_actions_update_t)(struct rte_eth_dev *dev, uint32_t queue_id, @@ -435,6 +448,7 @@ typedef int (*rte_flow_async_action_list_handle_query_update_t)( struct __rte_cache_aligned rte_flow_fp_ops { rte_flow_async_create_t async_create; rte_flow_async_create_by_index_t async_create_by_index; + rte_flow_async_create_by_index_with_pattern_t async_create_by_index_with_pattern; rte_flow_async_actions_update_t async_actions_update; rte_flow_async_destroy_t async_destroy; rte_flow_push_t push; diff --git a/lib/ethdev/version.map b/lib/ethdev/version.map index 79f6f5293b..1f18ccd08a 100644 --- a/lib/ethdev/version.map +++ b/lib/ethdev/version.map @@ -325,6 +325,10 @@ EXPERIMENTAL { rte_flow_template_table_resizable; rte_flow_template_table_resize; rte_flow_template_table_resize_complete; + + # added in 24.11 + rte_flow_async_create_by_index_with_pattern; + }; INTERNAL { -- 2.18.2