From: Jun Yang <jun.y...@nxp.com> Soft parser extracts ECPRI header and message to specified areas of parser result. Flow is classified according to the ECPRI extracts from praser result. This implementation supports ECPRI over ethernet/vlan/UDP and various types/messages combinations.
Signed-off-by: Jun Yang <jun.y...@nxp.com> --- drivers/net/dpaa2/dpaa2_ethdev.h | 18 ++ drivers/net/dpaa2/dpaa2_flow.c | 348 ++++++++++++++++++++++++++++++- 2 files changed, 365 insertions(+), 1 deletion(-) diff --git a/drivers/net/dpaa2/dpaa2_ethdev.h b/drivers/net/dpaa2/dpaa2_ethdev.h index aeddcfdfa9..eaa653d266 100644 --- a/drivers/net/dpaa2/dpaa2_ethdev.h +++ b/drivers/net/dpaa2/dpaa2_ethdev.h @@ -179,6 +179,8 @@ enum dpaa2_rx_faf_offset { FAFE_VXLAN_IN_IPV6_FRAM = 2, FAFE_VXLAN_IN_UDP_FRAM = 3, FAFE_VXLAN_IN_TCP_FRAM = 4, + + FAFE_ECPRI_FRAM = 7, /* Set by SP end*/ FAF_GTP_PRIMED_FRAM = 1 + DPAA2_FAFE_PSR_SIZE * 8, @@ -207,6 +209,17 @@ enum dpaa2_rx_faf_offset { FAF_ESP_FRAM = 89 + DPAA2_FAFE_PSR_SIZE * 8, }; +enum dpaa2_ecpri_fafe_type { + ECPRI_FAFE_TYPE_0 = (8 - FAFE_ECPRI_FRAM), + ECPRI_FAFE_TYPE_1 = (8 - FAFE_ECPRI_FRAM) | (1 << 1), + ECPRI_FAFE_TYPE_2 = (8 - FAFE_ECPRI_FRAM) | (2 << 1), + ECPRI_FAFE_TYPE_3 = (8 - FAFE_ECPRI_FRAM) | (3 << 1), + ECPRI_FAFE_TYPE_4 = (8 - FAFE_ECPRI_FRAM) | (4 << 1), + ECPRI_FAFE_TYPE_5 = (8 - FAFE_ECPRI_FRAM) | (5 << 1), + ECPRI_FAFE_TYPE_6 = (8 - FAFE_ECPRI_FRAM) | (6 << 1), + ECPRI_FAFE_TYPE_7 = (8 - FAFE_ECPRI_FRAM) | (7 << 1) +}; + #define DPAA2_PR_ETH_OFF_OFFSET 19 #define DPAA2_PR_TCI_OFF_OFFSET 21 #define DPAA2_PR_LAST_ETYPE_OFFSET 23 @@ -236,6 +249,11 @@ enum dpaa2_rx_faf_offset { #define DPAA2_VXLAN_IN_TYPE_OFFSET 46 /* Set by SP for vxlan distribution end*/ +/* ECPRI shares SP context with VXLAN*/ +#define DPAA2_ECPRI_MSG_OFFSET DPAA2_VXLAN_VNI_OFFSET + +#define DPAA2_ECPRI_MAX_EXTRACT_NB 8 + struct ipv4_sd_addr_extract_rule { uint32_t ipv4_src; uint32_t ipv4_dst; diff --git a/drivers/net/dpaa2/dpaa2_flow.c b/drivers/net/dpaa2/dpaa2_flow.c index e4d7117192..e4fffdbf33 100644 --- a/drivers/net/dpaa2/dpaa2_flow.c +++ b/drivers/net/dpaa2/dpaa2_flow.c @@ -156,6 +156,13 @@ static const struct rte_flow_item_vxlan dpaa2_flow_item_vxlan_mask = { .flags = 0xff, .vni = "\xff\xff\xff", }; + +static const struct rte_flow_item_ecpri dpaa2_flow_item_ecpri_mask = { + .hdr.common.type = 0xff, + .hdr.dummy[0] = RTE_BE32(0xffffffff), + .hdr.dummy[1] = RTE_BE32(0xffffffff), + .hdr.dummy[2] = RTE_BE32(0xffffffff), +}; #endif #define DPAA2_FLOW_DUMP printf @@ -1556,6 +1563,10 @@ dpaa2_flow_extract_support(const uint8_t *mask_src, mask_support = (const char *)&dpaa2_flow_item_vxlan_mask; size = sizeof(struct rte_flow_item_vxlan); break; + case RTE_FLOW_ITEM_TYPE_ECPRI: + mask_support = (const char *)&dpaa2_flow_item_ecpri_mask; + size = sizeof(struct rte_flow_item_ecpri); + break; default: return -EINVAL; } @@ -3238,6 +3249,330 @@ dpaa2_configure_flow_vxlan(struct dpaa2_dev_flow *flow, return 0; } +static int +dpaa2_configure_flow_ecpri(struct dpaa2_dev_flow *flow, + struct rte_eth_dev *dev, + const struct rte_flow_attr *attr, + const struct rte_dpaa2_flow_item *dpaa2_pattern, + const struct rte_flow_action actions[] __rte_unused, + struct rte_flow_error *error __rte_unused, + int *device_configured) +{ + int ret, local_cfg = 0; + uint32_t group; + const struct rte_flow_item_ecpri *spec, *mask; + struct rte_flow_item_ecpri local_mask; + struct dpaa2_dev_priv *priv = dev->data->dev_private; + const struct rte_flow_item *pattern = + &dpaa2_pattern->generic_item; + uint8_t extract_nb = 0, i; + uint64_t rule_data[DPAA2_ECPRI_MAX_EXTRACT_NB]; + uint64_t mask_data[DPAA2_ECPRI_MAX_EXTRACT_NB]; + uint8_t extract_size[DPAA2_ECPRI_MAX_EXTRACT_NB]; + uint8_t extract_off[DPAA2_ECPRI_MAX_EXTRACT_NB]; + + group = attr->group; + + /* Parse pattern list to get the matching parameters */ + spec = pattern->spec; + if (pattern->mask) { + memcpy(&local_mask, pattern->mask, + sizeof(struct rte_flow_item_ecpri)); + local_mask.hdr.common.u32 = + rte_be_to_cpu_32(local_mask.hdr.common.u32); + mask = &local_mask; + } else { + mask = &dpaa2_flow_item_ecpri_mask; + } + + /* Get traffic class index and flow id to be configured */ + flow->tc_id = group; + flow->tc_index = attr->priority; + + if (dpaa2_pattern->in_tunnel) { + DPAA2_PMD_ERR("Tunnel-ECPRI distribution not support"); + return -ENOTSUP; + } + + if (!spec) { + ret = dpaa2_flow_identify_by_faf(priv, flow, + FAFE_ECPRI_FRAM, DPAA2_FLOW_QOS_TYPE, + group, &local_cfg); + if (ret) + return ret; + + ret = dpaa2_flow_identify_by_faf(priv, flow, + FAFE_ECPRI_FRAM, DPAA2_FLOW_FS_TYPE, + group, &local_cfg); + if (ret) + return ret; + + (*device_configured) |= local_cfg; + return 0; + } + + if (dpaa2_flow_extract_support((const uint8_t *)mask, + RTE_FLOW_ITEM_TYPE_ECPRI)) { + DPAA2_PMD_WARN("Extract field(s) of ECPRI not support."); + + return -1; + } + + if (mask->hdr.common.type != 0xff) { + DPAA2_PMD_WARN("ECPRI header type not specified."); + + return -1; + } + + if (spec->hdr.common.type == RTE_ECPRI_MSG_TYPE_IQ_DATA) { + rule_data[extract_nb] = ECPRI_FAFE_TYPE_0; + mask_data[extract_nb] = 0xff; + extract_size[extract_nb] = sizeof(uint8_t); + extract_off[extract_nb] = DPAA2_FAFE_PSR_OFFSET; + extract_nb++; + + if (mask->hdr.type0.pc_id) { + rule_data[extract_nb] = spec->hdr.type0.pc_id; + mask_data[extract_nb] = mask->hdr.type0.pc_id; + extract_size[extract_nb] = sizeof(rte_be16_t); + extract_off[extract_nb] = + DPAA2_ECPRI_MSG_OFFSET + + offsetof(struct rte_ecpri_msg_iq_data, pc_id); + extract_nb++; + } + if (mask->hdr.type0.seq_id) { + rule_data[extract_nb] = spec->hdr.type0.seq_id; + mask_data[extract_nb] = mask->hdr.type0.seq_id; + extract_size[extract_nb] = sizeof(rte_be16_t); + extract_off[extract_nb] = + DPAA2_ECPRI_MSG_OFFSET + + offsetof(struct rte_ecpri_msg_iq_data, seq_id); + extract_nb++; + } + } else if (spec->hdr.common.type == RTE_ECPRI_MSG_TYPE_BIT_SEQ) { + rule_data[extract_nb] = ECPRI_FAFE_TYPE_1; + mask_data[extract_nb] = 0xff; + extract_size[extract_nb] = sizeof(uint8_t); + extract_off[extract_nb] = DPAA2_FAFE_PSR_OFFSET; + extract_nb++; + + if (mask->hdr.type1.pc_id) { + rule_data[extract_nb] = spec->hdr.type1.pc_id; + mask_data[extract_nb] = mask->hdr.type1.pc_id; + extract_size[extract_nb] = sizeof(rte_be16_t); + extract_off[extract_nb] = + DPAA2_ECPRI_MSG_OFFSET + + offsetof(struct rte_ecpri_msg_bit_seq, pc_id); + extract_nb++; + } + if (mask->hdr.type1.seq_id) { + rule_data[extract_nb] = spec->hdr.type1.seq_id; + mask_data[extract_nb] = mask->hdr.type1.seq_id; + extract_size[extract_nb] = sizeof(rte_be16_t); + extract_off[extract_nb] = + DPAA2_ECPRI_MSG_OFFSET + + offsetof(struct rte_ecpri_msg_bit_seq, seq_id); + extract_nb++; + } + } else if (spec->hdr.common.type == RTE_ECPRI_MSG_TYPE_RTC_CTRL) { + rule_data[extract_nb] = ECPRI_FAFE_TYPE_2; + mask_data[extract_nb] = 0xff; + extract_size[extract_nb] = sizeof(uint8_t); + extract_off[extract_nb] = DPAA2_FAFE_PSR_OFFSET; + extract_nb++; + + if (mask->hdr.type2.rtc_id) { + rule_data[extract_nb] = spec->hdr.type2.rtc_id; + mask_data[extract_nb] = mask->hdr.type2.rtc_id; + extract_size[extract_nb] = sizeof(rte_be16_t); + extract_off[extract_nb] = + DPAA2_ECPRI_MSG_OFFSET + + offsetof(struct rte_ecpri_msg_rtc_ctrl, rtc_id); + extract_nb++; + } + if (mask->hdr.type2.seq_id) { + rule_data[extract_nb] = spec->hdr.type2.seq_id; + mask_data[extract_nb] = mask->hdr.type2.seq_id; + extract_size[extract_nb] = sizeof(rte_be16_t); + extract_off[extract_nb] = + DPAA2_ECPRI_MSG_OFFSET + + offsetof(struct rte_ecpri_msg_rtc_ctrl, seq_id); + extract_nb++; + } + } else if (spec->hdr.common.type == RTE_ECPRI_MSG_TYPE_GEN_DATA) { + rule_data[extract_nb] = ECPRI_FAFE_TYPE_3; + mask_data[extract_nb] = 0xff; + extract_size[extract_nb] = sizeof(uint8_t); + extract_off[extract_nb] = DPAA2_FAFE_PSR_OFFSET; + extract_nb++; + + if (mask->hdr.type3.pc_id || mask->hdr.type3.seq_id) + DPAA2_PMD_WARN("Extract type3 msg not support."); + } else if (spec->hdr.common.type == RTE_ECPRI_MSG_TYPE_RM_ACC) { + rule_data[extract_nb] = ECPRI_FAFE_TYPE_4; + mask_data[extract_nb] = 0xff; + extract_size[extract_nb] = sizeof(uint8_t); + extract_off[extract_nb] = DPAA2_FAFE_PSR_OFFSET; + extract_nb++; + + if (mask->hdr.type4.rma_id) { + rule_data[extract_nb] = spec->hdr.type4.rma_id; + mask_data[extract_nb] = mask->hdr.type4.rma_id; + extract_size[extract_nb] = sizeof(uint8_t); + extract_off[extract_nb] = + DPAA2_ECPRI_MSG_OFFSET + 0; + /** Compiler not support to take address + * of bit-field + * offsetof(struct rte_ecpri_msg_rm_access, + * rma_id); + */ + extract_nb++; + } + if (mask->hdr.type4.ele_id) { + rule_data[extract_nb] = spec->hdr.type4.ele_id; + mask_data[extract_nb] = mask->hdr.type4.ele_id; + extract_size[extract_nb] = sizeof(rte_be16_t); + extract_off[extract_nb] = + DPAA2_ECPRI_MSG_OFFSET + 2; + /** Compiler not support to take address + * of bit-field + * offsetof(struct rte_ecpri_msg_rm_access, + * ele_id); + */ + extract_nb++; + } + } else if (spec->hdr.common.type == RTE_ECPRI_MSG_TYPE_DLY_MSR) { + rule_data[extract_nb] = ECPRI_FAFE_TYPE_5; + mask_data[extract_nb] = 0xff; + extract_size[extract_nb] = sizeof(uint8_t); + extract_off[extract_nb] = DPAA2_FAFE_PSR_OFFSET; + extract_nb++; + + if (mask->hdr.type5.msr_id) { + rule_data[extract_nb] = spec->hdr.type5.msr_id; + mask_data[extract_nb] = mask->hdr.type5.msr_id; + extract_size[extract_nb] = sizeof(uint8_t); + extract_off[extract_nb] = + DPAA2_ECPRI_MSG_OFFSET + + offsetof(struct rte_ecpri_msg_delay_measure, + msr_id); + extract_nb++; + } + if (mask->hdr.type5.act_type) { + rule_data[extract_nb] = spec->hdr.type5.act_type; + mask_data[extract_nb] = mask->hdr.type5.act_type; + extract_size[extract_nb] = sizeof(uint8_t); + extract_off[extract_nb] = + DPAA2_ECPRI_MSG_OFFSET + + offsetof(struct rte_ecpri_msg_delay_measure, + act_type); + extract_nb++; + } + } else if (spec->hdr.common.type == RTE_ECPRI_MSG_TYPE_RMT_RST) { + rule_data[extract_nb] = ECPRI_FAFE_TYPE_6; + mask_data[extract_nb] = 0xff; + extract_size[extract_nb] = sizeof(uint8_t); + extract_off[extract_nb] = DPAA2_FAFE_PSR_OFFSET; + extract_nb++; + + if (mask->hdr.type6.rst_id) { + rule_data[extract_nb] = spec->hdr.type6.rst_id; + mask_data[extract_nb] = mask->hdr.type6.rst_id; + extract_size[extract_nb] = sizeof(rte_be16_t); + extract_off[extract_nb] = + DPAA2_ECPRI_MSG_OFFSET + + offsetof(struct rte_ecpri_msg_remote_reset, + rst_id); + extract_nb++; + } + if (mask->hdr.type6.rst_op) { + rule_data[extract_nb] = spec->hdr.type6.rst_op; + mask_data[extract_nb] = mask->hdr.type6.rst_op; + extract_size[extract_nb] = sizeof(uint8_t); + extract_off[extract_nb] = + DPAA2_ECPRI_MSG_OFFSET + + offsetof(struct rte_ecpri_msg_remote_reset, + rst_op); + extract_nb++; + } + } else if (spec->hdr.common.type == RTE_ECPRI_MSG_TYPE_EVT_IND) { + rule_data[extract_nb] = ECPRI_FAFE_TYPE_7; + mask_data[extract_nb] = 0xff; + extract_size[extract_nb] = sizeof(uint8_t); + extract_off[extract_nb] = DPAA2_FAFE_PSR_OFFSET; + extract_nb++; + + if (mask->hdr.type7.evt_id) { + rule_data[extract_nb] = spec->hdr.type7.evt_id; + mask_data[extract_nb] = mask->hdr.type7.evt_id; + extract_size[extract_nb] = sizeof(uint8_t); + extract_off[extract_nb] = + DPAA2_ECPRI_MSG_OFFSET + + offsetof(struct rte_ecpri_msg_event_ind, + evt_id); + extract_nb++; + } + if (mask->hdr.type7.evt_type) { + rule_data[extract_nb] = spec->hdr.type7.evt_type; + mask_data[extract_nb] = mask->hdr.type7.evt_type; + extract_size[extract_nb] = sizeof(uint8_t); + extract_off[extract_nb] = + DPAA2_ECPRI_MSG_OFFSET + + offsetof(struct rte_ecpri_msg_event_ind, + evt_type); + extract_nb++; + } + if (mask->hdr.type7.seq) { + rule_data[extract_nb] = spec->hdr.type7.seq; + mask_data[extract_nb] = mask->hdr.type7.seq; + extract_size[extract_nb] = sizeof(uint8_t); + extract_off[extract_nb] = + DPAA2_ECPRI_MSG_OFFSET + + offsetof(struct rte_ecpri_msg_event_ind, + seq); + extract_nb++; + } + if (mask->hdr.type7.number) { + rule_data[extract_nb] = spec->hdr.type7.number; + mask_data[extract_nb] = mask->hdr.type7.number; + extract_size[extract_nb] = sizeof(uint8_t); + extract_off[extract_nb] = + DPAA2_ECPRI_MSG_OFFSET + + offsetof(struct rte_ecpri_msg_event_ind, + number); + extract_nb++; + } + } else { + DPAA2_PMD_ERR("Invalid ecpri header type(%d)", + spec->hdr.common.type); + return -EINVAL; + } + + for (i = 0; i < extract_nb; i++) { + ret = dpaa2_flow_add_pr_extract_rule(flow, + extract_off[i], + extract_size[i], &rule_data[i], &mask_data[i], + priv, group, + device_configured, + DPAA2_FLOW_QOS_TYPE); + if (ret) + return ret; + + ret = dpaa2_flow_add_pr_extract_rule(flow, + extract_off[i], + extract_size[i], &rule_data[i], &mask_data[i], + priv, group, + device_configured, + DPAA2_FLOW_FS_TYPE); + if (ret) + return ret; + } + + (*device_configured) |= local_cfg; + + return 0; +} + static int dpaa2_configure_flow_raw(struct dpaa2_dev_flow *flow, struct rte_eth_dev *dev, @@ -3870,6 +4205,16 @@ dpaa2_generic_flow_set(struct dpaa2_dev_flow *flow, goto end_flow_set; } break; + case RTE_FLOW_ITEM_TYPE_ECPRI: + ret = dpaa2_configure_flow_ecpri(flow, + dev, attr, &dpaa2_pattern[i], + actions, error, + &is_keycfg_configured); + if (ret) { + DPAA2_PMD_ERR("ECPRI flow config failed!"); + goto end_flow_set; + } + break; case RTE_FLOW_ITEM_TYPE_RAW: ret = dpaa2_configure_flow_raw(flow, dev, attr, &dpaa2_pattern[i], @@ -3884,7 +4229,8 @@ dpaa2_generic_flow_set(struct dpaa2_dev_flow *flow, end_of_list = 1; break; /*End of List*/ default: - DPAA2_PMD_ERR("Invalid action type"); + DPAA2_PMD_ERR("Invalid flow item[%d] type(%d)", + i, pattern[i].type); ret = -ENOTSUP; break; } -- 2.25.1