> -----Original Message-----
> From: Su, Simei <simei...@intel.com>
> Sent: Wednesday, March 18, 2020 1:42 PM
> To: Ye, Xiaolong <xiaolong...@intel.com>; Zhang, Qi Z <qi.z.zh...@intel.com>
> Cc: dev@dpdk.org; Cao, Yahui <yahui....@intel.com>; Wu, Jingjing
> <jingjing...@intel.com>; Su, Simei <simei...@intel.com>
> Subject: [PATCH 1/5] net/iavf: add support for FDIR basic rule
>
> This patch adds FDIR create/destroy/validate function in AVF.
> Common pattern and queue/qgroup/passthru/drop actions are supported.
>
> Signed-off-by: Simei Su <simei...@intel.com>
> ---
> drivers/net/iavf/Makefile | 1 +
> drivers/net/iavf/iavf.h | 16 +
> drivers/net/iavf/iavf_fdir.c | 762
> ++++++++++++++++++++++++++++++++++++++++++
> drivers/net/iavf/iavf_vchnl.c | 128 ++++++-
> drivers/net/iavf/meson.build | 1 +
> 5 files changed, 907 insertions(+), 1 deletion(-) create mode 100644
> drivers/net/iavf/iavf_fdir.c
>
> diff --git a/drivers/net/iavf/Makefile b/drivers/net/iavf/Makefile index
> 1bf0f26..193bc55 100644
> --- a/drivers/net/iavf/Makefile
> +++ b/drivers/net/iavf/Makefile
> @@ -24,6 +24,7 @@ SRCS-$(CONFIG_RTE_LIBRTE_IAVF_PMD) += iavf_ethdev.c
> SRCS-$(CONFIG_RTE_LIBRTE_IAVF_PMD) += iavf_vchnl.c
> SRCS-$(CONFIG_RTE_LIBRTE_IAVF_PMD) += iavf_rxtx.c
> SRCS-$(CONFIG_RTE_LIBRTE_IAVF_PMD) += iavf_generic_flow.c
> +SRCS-$(CONFIG_RTE_LIBRTE_IAVF_PMD) += iavf_fdir.c
> ifeq ($(CONFIG_RTE_ARCH_X86), y)
> SRCS-$(CONFIG_RTE_LIBRTE_IAVF_PMD) += iavf_rxtx_vec_sse.c endif diff --git
> a/drivers/net/iavf/iavf.h b/drivers/net/iavf/iavf.h index 48b9509..62a3eb8
> 100644
> --- a/drivers/net/iavf/iavf.h
> +++ b/drivers/net/iavf/iavf.h
> @@ -99,6 +99,16 @@ struct iavf_vsi {
> struct iavf_flow_parser_node;
> TAILQ_HEAD(iavf_parser_list, iavf_flow_parser_node);
>
> +struct iavf_fdir_conf {
> + struct virtchnl_fdir_fltr input;
> + uint64_t input_set;
> + uint32_t flow_id;
> +};
> +
> +struct iavf_fdir_info {
> + struct iavf_fdir_conf conf;
> +};
> +
> /* TODO: is that correct to assume the max number to be 16 ?*/
> #define IAVF_MAX_MSIX_VECTORS 16
>
> @@ -138,6 +148,8 @@ struct iavf_info {
> struct iavf_flow_list flow_list;
> struct iavf_parser_list rss_parser_list;
> struct iavf_parser_list dist_parser_list;
> +
> + struct iavf_fdir_info fdir; /* flow director info */
> };
>
> #define IAVF_MAX_PKT_TYPE 1024
> @@ -260,4 +272,8 @@ int iavf_config_promisc(struct iavf_adapter *adapter,
> bool enable_unicast, int iavf_add_del_eth_addr(struct iavf_adapter *adapter,
> struct rte_ether_addr *addr, bool add); int
> iavf_add_del_vlan(struct iavf_adapter *adapter, uint16_t vlanid, bool add);
> +int iavf_fdir_add(struct iavf_adapter *adapter, struct iavf_fdir_conf
> +*filter); int iavf_fdir_del(struct iavf_adapter *adapter, struct
> +iavf_fdir_conf *filter); int iavf_fdir_check(struct iavf_adapter *adapter,
> + struct iavf_fdir_conf *filter);
> #endif /* _IAVF_ETHDEV_H_ */
> diff --git a/drivers/net/iavf/iavf_fdir.c b/drivers/net/iavf/iavf_fdir.c new
> file
> mode 100644 index 0000000..dd321ba
> --- /dev/null
> +++ b/drivers/net/iavf/iavf_fdir.c
> @@ -0,0 +1,762 @@
> +/* SPDX-License-Identifier: BSD-3-Clause
> + * Copyright(c) 2019 Intel Corporation
> + */
> +
> +#include <sys/queue.h>
> +#include <stdio.h>
> +#include <errno.h>
> +#include <stdint.h>
> +#include <string.h>
> +#include <unistd.h>
> +#include <stdarg.h>
> +
> +#include <rte_ether.h>
> +#include <rte_ethdev_driver.h>
> +#include <rte_malloc.h>
> +#include <rte_tailq.h>
> +
> +#include "iavf.h"
> +#include "iavf_generic_flow.h"
> +#include "virtchnl.h"
> +
> +#define IAVF_FDIR_MAX_QREGION_SIZE 128
> +
> +#define IAVF_FDIR_IPV6_TC_OFFSET 20
> +#define IAVF_IPV6_TC_MASK (0xFF << IAVF_FDIR_IPV6_TC_OFFSET)
> +
> +#define IAVF_FDIR_INSET_ETH (\
> + IAVF_INSET_ETHERTYPE)
> +
> +#define IAVF_FDIR_INSET_ETH_IPV4 (\
> + IAVF_INSET_IPV4_SRC | IAVF_INSET_IPV4_DST | \
> + IAVF_INSET_IPV4_PROTO | IAVF_INSET_IPV4_TOS | \
> + IAVF_INSET_IPV4_TTL)
> +
> +#define IAVF_FDIR_INSET_ETH_IPV4_UDP (\
> + IAVF_INSET_IPV4_SRC | IAVF_INSET_IPV4_DST | \
> + IAVF_INSET_IPV4_TOS | IAVF_INSET_IPV4_TTL | \
> + IAVF_INSET_UDP_SRC_PORT | IAVF_INSET_UDP_DST_PORT)
> +
> +#define IAVF_FDIR_INSET_ETH_IPV4_TCP (\
> + IAVF_INSET_IPV4_SRC | IAVF_INSET_IPV4_DST | \
> + IAVF_INSET_IPV4_TOS | IAVF_INSET_IPV4_TTL | \
> + IAVF_INSET_TCP_SRC_PORT | IAVF_INSET_TCP_DST_PORT)
> +
> +#define IAVF_FDIR_INSET_ETH_IPV4_SCTP (\
> + IAVF_INSET_IPV4_SRC | IAVF_INSET_IPV4_DST | \
> + IAVF_INSET_IPV4_TOS | IAVF_INSET_IPV4_TTL | \
> + IAVF_INSET_SCTP_SRC_PORT | IAVF_INSET_SCTP_DST_PORT)
> +
> +#define IAVF_FDIR_INSET_ETH_IPV6 (\
> + IAVF_INSET_IPV6_SRC | IAVF_INSET_IPV6_DST | \
> + IAVF_INSET_IPV6_NEXT_HDR | IAVF_INSET_IPV6_TC | \
> + IAVF_INSET_IPV6_HOP_LIMIT)
> +
> +#define IAVF_FDIR_INSET_ETH_IPV6_UDP (\
> + IAVF_INSET_IPV6_SRC | IAVF_INSET_IPV6_DST | \
> + IAVF_INSET_IPV6_TC | IAVF_INSET_IPV6_HOP_LIMIT | \
> + IAVF_INSET_UDP_SRC_PORT | IAVF_INSET_UDP_DST_PORT)
> +
> +#define IAVF_FDIR_INSET_ETH_IPV6_TCP (\
> + IAVF_INSET_IPV6_SRC | IAVF_INSET_IPV6_DST | \
> + IAVF_INSET_IPV6_TC | IAVF_INSET_IPV6_HOP_LIMIT | \
> + IAVF_INSET_TCP_SRC_PORT | IAVF_INSET_TCP_DST_PORT)
> +
> +#define IAVF_FDIR_INSET_ETH_IPV6_SCTP (\
> + IAVF_INSET_IPV6_SRC | IAVF_INSET_IPV6_DST | \
> + IAVF_INSET_IPV6_TC | IAVF_INSET_IPV6_HOP_LIMIT | \
> + IAVF_INSET_SCTP_SRC_PORT | IAVF_INSET_SCTP_DST_PORT)
> +
> +static struct iavf_pattern_match_item iavf_fdir_pattern[] = {
> + {iavf_pattern_ethertype, IAVF_FDIR_INSET_ETH,
> IAVF_INSET_NONE},
> + {iavf_pattern_eth_ipv4, IAVF_FDIR_INSET_ETH_IPV4,
> IAVF_INSET_NONE},
> + {iavf_pattern_eth_ipv4_udp,
> IAVF_FDIR_INSET_ETH_IPV4_UDP, IAVF_INSET_NONE},
> + {iavf_pattern_eth_ipv4_tcp,
> IAVF_FDIR_INSET_ETH_IPV4_TCP, IAVF_INSET_NONE},
> + {iavf_pattern_eth_ipv4_sctp,
> IAVF_FDIR_INSET_ETH_IPV4_SCTP, IAVF_INSET_NONE},
> + {iavf_pattern_eth_ipv6, IAVF_FDIR_INSET_ETH_IPV6,
> IAVF_INSET_NONE},
> + {iavf_pattern_eth_ipv6_udp,
> IAVF_FDIR_INSET_ETH_IPV6_UDP, IAVF_INSET_NONE},
> + {iavf_pattern_eth_ipv6_tcp,
> IAVF_FDIR_INSET_ETH_IPV6_TCP, IAVF_INSET_NONE},
> + {iavf_pattern_eth_ipv6_sctp,
> IAVF_FDIR_INSET_ETH_IPV6_SCTP, IAVF_INSET_NONE},
> +};
> +
> +static struct iavf_flow_parser iavf_fdir_parser;
> +
> +static int
> +iavf_fdir_init(struct iavf_adapter *ad) {
> + struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(ad);
> + struct iavf_flow_parser *parser;
> +
> + if (vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_FDIR_PF)
> + parser = &iavf_fdir_parser;
> + else
> + return -ENOTSUP;
> +
> + return iavf_register_parser(parser, ad); }
> +
> +static void
> +iavf_fdir_uninit(struct iavf_adapter *ad) {
> + struct iavf_flow_parser *parser;
> +
> + parser = &iavf_fdir_parser;
> +
> + iavf_unregister_parser(parser, ad);
> +}
> +
> +static int
> +iavf_fdir_create(struct iavf_adapter *ad,
> + struct rte_flow *flow,
> + void *meta,
> + struct rte_flow_error *error)
> +{
> + struct iavf_fdir_conf *filter = meta;
> + struct iavf_fdir_conf *rule;
> + int ret;
> +
> + rule = rte_zmalloc("fdir_entry", sizeof(*rule), 0);
> + if (!rule) {
> + rte_flow_error_set(error, ENOMEM,
> + RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
> + "Failed to allocate memory");
> + return -rte_errno;
> + }
> +
> + ret = iavf_fdir_add(ad, filter);
> + if (ret) {
> + rte_flow_error_set(error, -ret,
> + RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
> + "Add filter rule failed.");
> + goto free_entry;
> + }
> +
> + rte_memcpy(rule, filter, sizeof(*rule));
> + flow->rule = rule;
> +
> + return 0;
> +
> +free_entry:
> + rte_free(rule);
> + return -rte_errno;
> +}
> +
> +static int
> +iavf_fdir_destroy(struct iavf_adapter *ad,
> + struct rte_flow *flow,
> + struct rte_flow_error *error)
> +{
> + struct iavf_fdir_conf *filter;
> + int ret;
> +
> + filter = (struct iavf_fdir_conf *)flow->rule;
> +
> + ret = iavf_fdir_del(ad, filter);
> + if (ret) {
> + rte_flow_error_set(error, -ret,
> + RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
> + "Del filter rule failed.");
> + return -rte_errno;
> + }
> +
> + flow->rule = NULL;
> + rte_free(filter);
> +
> + return 0;
> +}
> +
> +static int
> +iavf_fdir_validation(struct iavf_adapter *ad,
> + __rte_unused struct rte_flow *flow,
> + void *meta,
> + struct rte_flow_error *error)
> +{
> + struct iavf_fdir_conf *filter = meta;
> + int ret;
> +
> + ret = iavf_fdir_check(ad, filter);
> + if (ret) {
> + rte_flow_error_set(error, -ret,
> + RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
> + "Validate filter rule failed.");
> + return -rte_errno;
> + }
> +
> + return 0;
> +};
> +
> +static struct iavf_flow_engine iavf_fdir_engine = {
> + .init = iavf_fdir_init,
> + .uninit = iavf_fdir_uninit,
> + .create = iavf_fdir_create,
> + .destroy = iavf_fdir_destroy,
> + .validation = iavf_fdir_validation,
> + .type = IAVF_FLOW_ENGINE_FDIR,
> +};
> +
> +static int
> +iavf_fdir_parse_action_qregion(struct iavf_adapter *ad,
> + struct rte_flow_error *error,
> + const struct rte_flow_action *act,
> + struct virtchnl_filter_action *filter_action) {
> + const struct rte_flow_action_rss *rss = act->conf;
> + uint32_t i;
> +
> + if (act->type != RTE_FLOW_ACTION_TYPE_RSS) {
> + rte_flow_error_set(error, EINVAL,
> + RTE_FLOW_ERROR_TYPE_ACTION, act,
> + "Invalid action.");
> + return -rte_errno;
> + }
> +
> + if (rss->queue_num <= 1) {
> + rte_flow_error_set(error, EINVAL,
> + RTE_FLOW_ERROR_TYPE_ACTION, act,
> + "Queue region size can't be 0 or 1.");
> + return -rte_errno;
> + }
> +
> + /* check if queue index for queue region is continuous */
> + for (i = 0; i < rss->queue_num - 1; i++) {
> + if (rss->queue[i + 1] != rss->queue[i] + 1) {
> + rte_flow_error_set(error, EINVAL,
> + RTE_FLOW_ERROR_TYPE_ACTION, act,
> + "Discontinuous queue region");
> + return -rte_errno;
> + }
> + }
> +
> + if (rss->queue[rss->queue_num - 1] >= ad->eth_dev->data-
> >nb_rx_queues) {
> + rte_flow_error_set(error, EINVAL,
> + RTE_FLOW_ERROR_TYPE_ACTION, act,
> + "Invalid queue region indexes.");
> + return -rte_errno;
> + }
> +
> + if (!(rte_is_power_of_2(rss->queue_num) &&
> + (rss->queue_num <= IAVF_FDIR_MAX_QREGION_SIZE))) {
> + rte_flow_error_set(error, EINVAL,
> + RTE_FLOW_ERROR_TYPE_ACTION, act,
> + "The region size should be any of the following
> values:"
> + "1, 2, 4, 8, 16, 32, 64, 128 as long as the
> total
> number "
> + "of queues do not exceed the VSI allocation.");
> + return -rte_errno;
> + }
> +
> + filter_action->q_index = rss->queue[0];
> + filter_action->q_region = rte_fls_u32(rss->queue_num) - 1;
> +
> + return 0;
> +}
> +
> +static int
> +iavf_fdir_parse_action(struct iavf_adapter *ad,
> + const struct rte_flow_action actions[],
> + struct rte_flow_error *error,
> + struct iavf_fdir_conf *filter)
> +{
> + const struct rte_flow_action_queue *act_q;
> + uint32_t dest_num = 0;
> + int ret;
> +
> + int number = 0;
> + struct virtchnl_filter_action *filter_action;
> +
> + for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
> + switch (actions->type) {
> + case RTE_FLOW_ACTION_TYPE_VOID:
> + break;
> +
> + case RTE_FLOW_ACTION_TYPE_PASSTHRU:
> + dest_num++;
> +
> + filter_action = &filter->input.rule_cfg.
> + action_set.actions[number];
> +
> + filter_action->type = VIRTCHNL_FDIR_ACT_PASSTHRU;
> +
> + filter->input.rule_cfg.action_set.count = ++number;
> + break;
> +
> + case RTE_FLOW_ACTION_TYPE_DROP:
> + dest_num++;
> +
> + filter_action = &filter->input.rule_cfg.
> + action_set.actions[number];
> +
> + filter_action->type = VIRTCHNL_FDIR_ACT_DROP;
> +
> + filter->input.rule_cfg.action_set.count = ++number;
[Cao, Yahui]
It seems there is no count/number upper bound check, there may be out of bound
index access
This also applies to all the count/number statement below.
> + break;
> +
> + case RTE_FLOW_ACTION_TYPE_QUEUE:
> + dest_num++;
> +
> + act_q = actions->conf;
> + filter_action = &filter->input.rule_cfg.
> + action_set.actions[number];
> +
> + filter_action->type = VIRTCHNL_FDIR_ACT_QUEUE;
> + filter_action->q_index = act_q->index;
> +
> + if (filter_action->q_index >=
> + ad->eth_dev->data->nb_rx_queues) {
> + rte_flow_error_set(error, EINVAL,
> + RTE_FLOW_ERROR_TYPE_ACTION,
> + actions, "Invalid queue for FDIR.");
> + return -rte_errno;
> + }
> +
> + filter->input.rule_cfg.action_set.count = ++number;
> + break;
> +
> + case RTE_FLOW_ACTION_TYPE_RSS:
> + dest_num++;
> +
> + filter_action = &filter->input.rule_cfg.
> + action_set.actions[number];
> +
> + filter_action->type = VIRTCHNL_FDIR_ACT_Q_REGION;
> +
> + ret = iavf_fdir_parse_action_qregion(ad,
> + error, actions, filter_action);
> + if (ret)
> + return ret;
> +
> + filter->input.rule_cfg.action_set.count = ++number;
> + break;
> +
> + default:
> + rte_flow_error_set(error, EINVAL,
> + RTE_FLOW_ERROR_TYPE_ACTION,
> actions,
> + "Invalid action.");
> + return -rte_errno;
> + }
> + }
> +
> + if (dest_num == 0 || dest_num >= 2) {
> + rte_flow_error_set(error, EINVAL,
> + RTE_FLOW_ERROR_TYPE_ACTION, actions,
> + "Unsupported action combination");
> + return -rte_errno;
> + }
> +
> + return 0;
> +}
> +
> +static int
> +iavf_fdir_parse_pattern(__rte_unused struct iavf_adapter *ad,
> + const struct rte_flow_item pattern[],
> + struct rte_flow_error *error,
> + struct iavf_fdir_conf *filter)
> +{
> + const struct rte_flow_item *item = pattern;
> + enum rte_flow_item_type item_type;
> + enum rte_flow_item_type l3 = RTE_FLOW_ITEM_TYPE_END;
> + const struct rte_flow_item_eth *eth_spec, *eth_mask;
> + const struct rte_flow_item_ipv4 *ipv4_spec, *ipv4_mask;
> + const struct rte_flow_item_ipv6 *ipv6_spec, *ipv6_mask;
> + const struct rte_flow_item_udp *udp_spec, *udp_mask;
> + const struct rte_flow_item_tcp *tcp_spec, *tcp_mask;
> + const struct rte_flow_item_sctp *sctp_spec, *sctp_mask;
> + uint64_t input_set = IAVF_INSET_NONE;
> +
> + enum rte_flow_item_type next_type;
> + uint16_t ether_type;
> +
> + int layer = 0;
> + struct virtchnl_proto_hdr *hdr;
> +
> + uint8_t ipv6_addr_mask[16] = {
> + 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
> + 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF
> + };
> +
> + for (item = pattern; item->type != RTE_FLOW_ITEM_TYPE_END; item++)
> {
> + if (item->last) {
> + rte_flow_error_set(error, EINVAL,
> + RTE_FLOW_ERROR_TYPE_ITEM, item,
> + "Not support range");
> + }
> +
> + item_type = item->type;
> +
> + switch (item_type) {
> + case RTE_FLOW_ITEM_TYPE_ETH:
> + eth_spec = item->spec;
> + eth_mask = item->mask;
> + next_type = (item + 1)->type;
> +
> + hdr = &filter->input.rule_cfg.proto_stack.
> + proto_hdr[layer];
> +
> + VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, ETH);
> +
> + if (next_type == RTE_FLOW_ITEM_TYPE_END &&
> + (!eth_spec || !eth_mask)) {
> + rte_flow_error_set(error, EINVAL,
> +
> RTE_FLOW_ERROR_TYPE_ITEM,
> + item, "NULL eth spec/mask.");
> + return -rte_errno;
> + }
> +
> + if (eth_spec && eth_mask) {
> + if (!rte_is_zero_ether_addr(ð_mask->src) ||
> + !rte_is_zero_ether_addr(ð_mask->dst)) {
> + rte_flow_error_set(error, EINVAL,
> +
> RTE_FLOW_ERROR_TYPE_ITEM, item,
> + "Invalid MAC_addr mask.");
> + return -rte_errno;
> + }
> + }
> +
> + if (eth_spec && eth_mask && eth_mask->type) {
> + if (eth_mask->type != RTE_BE16(0xffff)) {
> + rte_flow_error_set(error, EINVAL,
> +
> RTE_FLOW_ERROR_TYPE_ITEM,
> + item, "Invalid type mask.");
> + return -rte_errno;
> + }
> +
> + ether_type = rte_be_to_cpu_16(eth_spec-
> >type);
> + if (ether_type == RTE_ETHER_TYPE_IPV4 ||
> + ether_type == RTE_ETHER_TYPE_IPV6)
> {
> + rte_flow_error_set(error, EINVAL,
> +
> RTE_FLOW_ERROR_TYPE_ITEM,
> + item,
> + "Unsupported ether_type.");
> + return -rte_errno;
> + }
> +
> + input_set |= IAVF_INSET_ETHERTYPE;
> + VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr,
> + ETH, ETHERTYPE);
> +
> + rte_memcpy(hdr->buffer,
> + eth_spec, sizeof(*eth_spec));
> + }
> +
> + filter->input.rule_cfg.proto_stack.count = ++layer;
[Cao, Yahui]
It seems there is no count/layer upper bound check, there may be out of bound
index access
This also applies to all the count/layer statement below.
> + break;
> +
> + case RTE_FLOW_ITEM_TYPE_IPV4:
> + l3 = RTE_FLOW_ITEM_TYPE_IPV4;
> + ipv4_spec = item->spec;
> + ipv4_mask = item->mask;
> +
> + hdr = &filter->input.rule_cfg.proto_stack.
> + proto_hdr[layer];
> +
> + VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, IPV4);
> +
> + if (ipv4_spec && ipv4_mask) {
> + if (ipv4_mask->hdr.version_ihl ||
> + ipv4_mask->hdr.total_length ||
> + ipv4_mask->hdr.packet_id ||
> + ipv4_mask->hdr.fragment_offset ||
> + ipv4_mask->hdr.hdr_checksum) {
> + rte_flow_error_set(error, EINVAL,
> +
> RTE_FLOW_ERROR_TYPE_ITEM,
> + item, "Invalid IPv4 mask.");
> + return -rte_errno;
> + }
> +
> + if (ipv4_mask->hdr.type_of_service ==
> + UINT8_MAX) {
> + input_set |= IAVF_INSET_IPV4_TOS;
> +
> VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(
> + hdr, IPV4, DSCP);
> + }
> + if (ipv4_mask->hdr.next_proto_id ==
> UINT8_MAX) {
> + input_set |= IAVF_INSET_IPV4_PROTO;
> +
> VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(
> + hdr, IPV4, PROT);
> + }
> + if (ipv4_mask->hdr.time_to_live == UINT8_MAX)
> {
> + input_set |= IAVF_INSET_IPV4_TTL;
> +
> VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(
> + hdr, IPV4, TTL);
> + }
> + if (ipv4_mask->hdr.src_addr == UINT32_MAX) {
> + input_set |= IAVF_INSET_IPV4_SRC;
> +
> VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(
> + hdr, IPV4, SRC);
> + }
> + if (ipv4_mask->hdr.dst_addr == UINT32_MAX) {
> + input_set |= IAVF_INSET_IPV4_DST;
> +
> VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(
> + hdr, IPV4, DST);
> + }
> +
> + rte_memcpy(hdr->buffer,
> + &ipv4_spec->hdr,
> + sizeof(ipv4_spec->hdr));
> + }
> +
> + filter->input.rule_cfg.proto_stack.count = ++layer;
> + break;
> +
> + case RTE_FLOW_ITEM_TYPE_IPV6:
> + l3 = RTE_FLOW_ITEM_TYPE_IPV6;
> + ipv6_spec = item->spec;
> + ipv6_mask = item->mask;
> +
> + hdr = &filter->input.rule_cfg.proto_stack.
> + proto_hdr[layer];
> +
> + VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, IPV6);
> +
> + if (ipv6_spec && ipv6_mask) {
> + if (ipv6_mask->hdr.payload_len) {
> + rte_flow_error_set(error, EINVAL,
> +
> RTE_FLOW_ERROR_TYPE_ITEM,
> + item, "Invalid IPv6 mask");
> + return -rte_errno;
> + }
> +
> + if ((ipv6_mask->hdr.vtc_flow &
> +
> rte_cpu_to_be_32(IAVF_IPV6_TC_MASK))
> + == rte_cpu_to_be_32(
> + IAVF_IPV6_TC_MASK))
> {
> + input_set |= IAVF_INSET_IPV6_TC;
> +
> VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(
> + hdr, IPV6, TC);
> + }
> + if (ipv6_mask->hdr.proto == UINT8_MAX) {
> + input_set |=
> IAVF_INSET_IPV6_NEXT_HDR;
> +
> VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(
> + hdr, IPV6, PROT);
> + }
> + if (ipv6_mask->hdr.hop_limits == UINT8_MAX) {
> + input_set |=
> IAVF_INSET_IPV6_HOP_LIMIT;
> +
> VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(
> + hdr, IPV6, HOP_LIMIT);
> + }
> + if (!memcmp(ipv6_mask->hdr.src_addr,
> + ipv6_addr_mask,
> + RTE_DIM(ipv6_mask->hdr.src_addr))) {
> + input_set |= IAVF_INSET_IPV6_SRC;
> +
> VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(
> + hdr, IPV6, SRC);
> + }
> + if (!memcmp(ipv6_mask->hdr.dst_addr,
> + ipv6_addr_mask,
> + RTE_DIM(ipv6_mask->hdr.dst_addr))) {
> + input_set |= IAVF_INSET_IPV6_DST;
> +
> VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(
> + hdr, IPV6, DST);
> + }
> +
> + rte_memcpy(hdr->buffer,
> + &ipv6_spec->hdr,
> + sizeof(ipv6_spec->hdr));
> + }
> +
> + filter->input.rule_cfg.proto_stack.count = ++layer;
> + break;
> +
> + case RTE_FLOW_ITEM_TYPE_UDP:
> + udp_spec = item->spec;
> + udp_mask = item->mask;
> +
> + hdr = &filter->input.rule_cfg.proto_stack.
> + proto_hdr[layer];
> +
> + VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, UDP);
> +
> + if (udp_spec && udp_mask) {
> + if (udp_mask->hdr.dgram_len ||
> + udp_mask->hdr.dgram_cksum) {
> + rte_flow_error_set(error, EINVAL,
> +
> RTE_FLOW_ERROR_TYPE_ITEM, item,
> + "Invalid UDP mask");
> + return -rte_errno;
> + }
> +
> + if (udp_mask->hdr.src_port == UINT16_MAX) {
> + input_set |=
> IAVF_INSET_UDP_SRC_PORT;
> +
> VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(
> + hdr, UDP, SRC_PORT);
> + }
> + if (udp_mask->hdr.dst_port == UINT16_MAX) {
> + input_set |=
> IAVF_INSET_UDP_DST_PORT;
> +
> VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(
> + hdr, UDP, DST_PORT);
> + }
> +
> + if (l3 == RTE_FLOW_ITEM_TYPE_IPV4)
> + rte_memcpy(hdr->buffer,
> + &udp_spec->hdr,
> + sizeof(udp_spec->hdr));
> + else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6)
> + rte_memcpy(hdr->buffer,
> + &udp_spec->hdr,
> + sizeof(udp_spec->hdr));
> + }
> +
> + filter->input.rule_cfg.proto_stack.count = ++layer;
> + break;
> +
> + case RTE_FLOW_ITEM_TYPE_TCP:
> + tcp_spec = item->spec;
> + tcp_mask = item->mask;
> +
> + hdr = &filter->input.rule_cfg.proto_stack.
> + proto_hdr[layer];
> +
> + VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, TCP);
> +
> + if (tcp_spec && tcp_mask) {
> + if (tcp_mask->hdr.sent_seq ||
> + tcp_mask->hdr.recv_ack ||
> + tcp_mask->hdr.data_off ||
> + tcp_mask->hdr.tcp_flags ||
> + tcp_mask->hdr.rx_win ||
> + tcp_mask->hdr.cksum ||
> + tcp_mask->hdr.tcp_urp) {
> + rte_flow_error_set(error, EINVAL,
> +
> RTE_FLOW_ERROR_TYPE_ITEM, item,
> + "Invalid TCP mask");
> + return -rte_errno;
> + }
> +
> + if (tcp_mask->hdr.src_port == UINT16_MAX) {
> + input_set |=
> IAVF_INSET_TCP_SRC_PORT;
> +
> VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(
> + hdr, TCP, SRC_PORT);
> + }
> + if (tcp_mask->hdr.dst_port == UINT16_MAX) {
> + input_set |=
> IAVF_INSET_TCP_DST_PORT;
> +
> VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(
> + hdr, TCP, DST_PORT);
> + }
> +
> + if (l3 == RTE_FLOW_ITEM_TYPE_IPV4)
> + rte_memcpy(hdr->buffer,
> + &tcp_spec->hdr,
> + sizeof(tcp_spec->hdr));
> + else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6)
> + rte_memcpy(hdr->buffer,
> + &tcp_spec->hdr,
> + sizeof(tcp_spec->hdr));
> + }
> +
> + filter->input.rule_cfg.proto_stack.count = ++layer;
> + break;
> +
> + case RTE_FLOW_ITEM_TYPE_SCTP:
> + sctp_spec = item->spec;
> + sctp_mask = item->mask;
> +
> + hdr = &filter->input.rule_cfg.proto_stack.
> + proto_hdr[layer];
> +
> + VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, SCTP);
> +
> + if (sctp_spec && sctp_mask) {
> + if (sctp_mask->hdr.cksum) {
> + rte_flow_error_set(error, EINVAL,
> +
> RTE_FLOW_ERROR_TYPE_ITEM, item,
> + "Invalid UDP mask");
> + return -rte_errno;
> + }
> +
> + if (sctp_mask->hdr.src_port == UINT16_MAX) {
> + input_set |=
> IAVF_INSET_SCTP_SRC_PORT;
> +
> VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(
> + hdr, SCTP, SRC_PORT);
> + }
> + if (sctp_mask->hdr.dst_port == UINT16_MAX) {
> + input_set |=
> IAVF_INSET_SCTP_DST_PORT;
> +
> VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(
> + hdr, SCTP, DST_PORT);
> + }
> +
> + if (l3 == RTE_FLOW_ITEM_TYPE_IPV4)
> + rte_memcpy(hdr->buffer,
> + &sctp_spec->hdr,
> + sizeof(sctp_spec->hdr));
> + else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6)
> + rte_memcpy(hdr->buffer,
> + &sctp_spec->hdr,
> + sizeof(sctp_spec->hdr));
> + }
> +
> + filter->input.rule_cfg.proto_stack.count = ++layer;
> + break;
> +
> + case RTE_FLOW_ITEM_TYPE_VOID:
> + break;
> +
> + default:
> + rte_flow_error_set(error, EINVAL,
> + RTE_FLOW_ERROR_TYPE_ITEM, item,
> + "Invalid pattern item.");
> + return -rte_errno;
> + }
> + }
> +
> + filter->input_set = input_set;
> +
> + return 0;
> +}
> +
> +static int
> +iavf_fdir_parse(struct iavf_adapter *ad,
> + struct iavf_pattern_match_item *array,
> + uint32_t array_len,
> + const struct rte_flow_item pattern[],
> + const struct rte_flow_action actions[],
> + void **meta,
> + struct rte_flow_error *error)
> +{
> + struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(ad);
> + struct iavf_fdir_conf *filter = &vf->fdir.conf;
> + struct iavf_pattern_match_item *item = NULL;
> + uint64_t input_set;
> + int ret;
> +
> + memset(filter, 0, sizeof(*filter));
> +
> + item = iavf_search_pattern_match_item(pattern, array, array_len,
> error);
> + if (!item)
> + return -rte_errno;
> +
> + ret = iavf_fdir_parse_pattern(ad, pattern, error, filter);
> + if (ret)
> + goto error;
> +
> + input_set = filter->input_set;
> + if (!input_set || input_set & ~item->input_set_mask) {
> + rte_flow_error_set(error, EINVAL,
> + RTE_FLOW_ERROR_TYPE_ITEM_SPEC, pattern,
> + "Invalid input set");
> + ret = -rte_errno;
> + goto error;
> + }
> +
> + ret = iavf_fdir_parse_action(ad, actions, error, filter);
> + if (ret)
> + goto error;
> +
> + if (meta)
> + *meta = filter;
> +
> +error:
> + rte_free(item);
> + return ret;
> +}
> +
> +static struct iavf_flow_parser iavf_fdir_parser = {
> + .engine = &iavf_fdir_engine,
> + .array = iavf_fdir_pattern,
> + .array_len = RTE_DIM(iavf_fdir_pattern),
> + .parse_pattern_action = iavf_fdir_parse,
> + .stage = IAVF_FLOW_STAGE_DISTRIBUTOR,
> +};
> +
> +RTE_INIT(iavf_fdir_engine_register)
> +{
> + iavf_register_flow_engine(&iavf_fdir_engine);
> +}
> diff --git a/drivers/net/iavf/iavf_vchnl.c b/drivers/net/iavf/iavf_vchnl.c
> index
> 11c70f5..77bfd1b 100644
> --- a/drivers/net/iavf/iavf_vchnl.c
> +++ b/drivers/net/iavf/iavf_vchnl.c
> @@ -342,7 +342,8 @@
>
> caps = IAVF_BASIC_OFFLOAD_CAPS |
> VIRTCHNL_VF_CAP_ADV_LINK_SPEED |
> VIRTCHNL_VF_OFFLOAD_QUERY_DDP |
> - VIRTCHNL_VF_OFFLOAD_RX_FLEX_DESC;
> + VIRTCHNL_VF_OFFLOAD_RX_FLEX_DESC |
> + VIRTCHNL_VF_OFFLOAD_FDIR_PF;
>
> args.in_args = (uint8_t *)∩︀
> args.in_args_size = sizeof(caps);
> @@ -867,3 +868,128 @@
>
> return err;
> }
> +
> +int
> +iavf_fdir_add(struct iavf_adapter *adapter,
> + struct iavf_fdir_conf *filter)
> +{
> + struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(adapter);
> + struct virtchnl_fdir_status *fdir_status;
> +
> + struct iavf_cmd_info args;
> + int err;
> +
> + filter->input.vsi_id = vf->vsi_res->vsi_id;
> + filter->input.validate_only = 0;
> +
> + args.ops = VIRTCHNL_OP_ADD_FDIR_FILTER;
> + args.in_args = (uint8_t *)(&filter->input);
> + args.in_args_size = sizeof(*(&filter->input));
> + args.out_buffer = vf->aq_resp;
> + args.out_size = IAVF_AQ_BUF_SZ;
> +
> + err = iavf_execute_vf_cmd(adapter, &args);
> + if (err) {
> + PMD_DRV_LOG(ERR, "fail to execute command
> OP_ADD_FDIR_FILTER");
> + return err;
> + }
> +
> + fdir_status = (struct virtchnl_fdir_status *)args.out_buffer;
> + filter->flow_id = fdir_status->flow_id;
> +
> + if (fdir_status->status == VIRTCHNL_FDIR_SUCCESS)
> + PMD_DRV_LOG(INFO,
> + "add rule request is successfully done by PF");
> + else if (fdir_status->status ==
> VIRTCHNL_FDIR_FAILURE_RULE_NORESOURCE)
> + PMD_DRV_LOG(INFO,
> + "add rule request is failed due to no hw resource");
> + else if (fdir_status->status ==
> VIRTCHNL_FDIR_FAILURE_RULE_CONFLICT)
> + PMD_DRV_LOG(INFO,
> + "add rule request is failed due to the rule is already
> existed");
> + else if (fdir_status->status == VIRTCHNL_FDIR_FAILURE_RULE_INVALID)
> + PMD_DRV_LOG(INFO,
> + "add rule request is failed due to the hw doesn't
> support");
> + else if (fdir_status->status ==
> VIRTCHNL_FDIR_FAILURE_RULE_TIMEOUT)
> + PMD_DRV_LOG(INFO,
> + "add rule request is failed due to time out for
> programming");
> +
> + return 0;
> +};
> +
> +int
> +iavf_fdir_del(struct iavf_adapter *adapter,
> + struct iavf_fdir_conf *filter)
> +{
> + struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(adapter);
> + struct virtchnl_fdir_status *fdir_status;
> +
> + struct iavf_cmd_info args;
> + int err;
> +
> + filter->input.vsi_id = vf->vsi_res->vsi_id;
> + filter->input.flow_id = filter->flow_id;
> +
> + args.ops = VIRTCHNL_OP_DEL_FDIR_FILTER;
> + args.in_args = (uint8_t *)(&filter->input);
> + args.in_args_size = sizeof(filter->input);
> + args.out_buffer = vf->aq_resp;
> + args.out_size = IAVF_AQ_BUF_SZ;
> +
> + err = iavf_execute_vf_cmd(adapter, &args);
> + if (err) {
> + PMD_DRV_LOG(ERR, "fail to execute command
> OP_DEL_FDIR_FILTER");
> + return err;
> + }
> +
> + fdir_status = (struct virtchnl_fdir_status *)args.out_buffer;
> +
> + if (fdir_status->status == VIRTCHNL_FDIR_SUCCESS)
> + PMD_DRV_LOG(INFO,
> + "delete rule request is successfully done by PF");
> + else if (fdir_status->status ==
> VIRTCHNL_FDIR_FAILURE_RULE_NONEXIST)
> + PMD_DRV_LOG(INFO,
> + "delete rule request is failed due to this rule doesn't
> exist");
> + else if (fdir_status->status ==
> VIRTCHNL_FDIR_FAILURE_RULE_TIMEOUT)
> + PMD_DRV_LOG(INFO,
> + "delete rule request is failed due to time out for
> programming");
> +
> + return 0;
> +};
> +
> +int
> +iavf_fdir_check(struct iavf_adapter *adapter,
> + struct iavf_fdir_conf *filter)
> +{
> + struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(adapter);
> + struct virtchnl_fdir_status *fdir_status;
> +
> + struct iavf_cmd_info args;
> + int err;
> +
> + filter->input.vsi_id = vf->vsi_res->vsi_id;
> + filter->input.validate_only = 1;
> +
> + args.ops = VIRTCHNL_OP_ADD_FDIR_FILTER;
> + args.in_args = (uint8_t *)(&filter->input);
> + args.in_args_size = sizeof(*(&filter->input));
> + args.out_buffer = vf->aq_resp;
> + args.out_size = IAVF_AQ_BUF_SZ;
> +
> + err = iavf_execute_vf_cmd(adapter, &args);
> + if (err) {
> + PMD_DRV_LOG(ERR, "fail to check flow direcotor rule");
> + return err;
> + }
> +
> + fdir_status = (struct virtchnl_fdir_status *)args.out_buffer;
> +
> + if (fdir_status->status == VIRTCHNL_FDIR_SUCCESS)
> + PMD_DRV_LOG(INFO,
> + "check rule request is successfully done by PF");
> + else if (fdir_status->status == VIRTCHNL_FDIR_FAILURE_RULE_INVALID)
> + PMD_DRV_LOG(INFO,
> + "check rule request is failed due to parameters
> validation"
> + " or HW doesn't support");
> +
> + return 0;
> +}
> diff --git a/drivers/net/iavf/meson.build b/drivers/net/iavf/meson.build index
> 32eabca..ce71054 100644
> --- a/drivers/net/iavf/meson.build
> +++ b/drivers/net/iavf/meson.build
> @@ -13,6 +13,7 @@ sources = files(
> 'iavf_rxtx.c',
> 'iavf_vchnl.c',
> 'iavf_generic_flow.c',
> + 'iavf_fdir.c',
> )
>
> if arch_subdir == 'x86'
> --
> 1.8.3.1