Add routines to perform pre & post processing for down link entities.

Signed-off-by: Anoob Joseph <ano...@marvell.com>
Signed-off-by: Kiran Kumar K <kirankum...@marvell.com>
Signed-off-by: Volodymyr Fialko <vfia...@marvell.com>
---
 lib/pdcp/pdcp_entity.h  |   2 +
 lib/pdcp/pdcp_process.c | 453 ++++++++++++++++++++++++++++++++++++++++
 2 files changed, 455 insertions(+)

diff --git a/lib/pdcp/pdcp_entity.h b/lib/pdcp/pdcp_entity.h
index 46cdaead09..d2d9bbe149 100644
--- a/lib/pdcp/pdcp_entity.h
+++ b/lib/pdcp/pdcp_entity.h
@@ -13,6 +13,8 @@
 
 struct entity_priv;
 
+#define PDCP_HFN_MIN 0
+
 /* IV generation function based on the entity configuration */
 typedef void (*iv_gen_t)(struct rte_crypto_op *cop, const struct entity_priv 
*en_priv,
                         uint32_t count);
diff --git a/lib/pdcp/pdcp_process.c b/lib/pdcp/pdcp_process.c
index 7c1fc85fcb..79d6ca352a 100644
--- a/lib/pdcp/pdcp_process.c
+++ b/lib/pdcp/pdcp_process.c
@@ -329,9 +329,423 @@ pdcp_post_process_ul(const struct rte_pdcp_entity *entity,
        return nb_success;
 }
 
+static inline int
+pdcp_sn_count_get(const uint32_t rx_deliv, int32_t rsn, uint32_t *count,
+                 const enum rte_security_pdcp_sn_size sn_size)
+{
+       const uint32_t rx_deliv_sn = pdcp_sn_from_count_get(rx_deliv, sn_size);
+       const uint32_t window_sz = pdcp_window_size_get(sn_size);
+       uint32_t rhfn;
+
+       rhfn = pdcp_hfn_from_count_get(rx_deliv, sn_size);
+
+       if (rsn < (int32_t)(rx_deliv_sn - window_sz)) {
+               if (unlikely(rhfn == pdcp_hfn_max(sn_size)))
+                       return -ERANGE;
+               rhfn += 1;
+       } else if ((uint32_t)rsn >= (rx_deliv_sn + window_sz)) {
+               if (unlikely(rhfn == PDCP_HFN_MIN))
+                       return -ERANGE;
+               rhfn -= 1;
+       }
+
+       *count = pdcp_count_from_hfn_sn_get(rhfn, rsn, sn_size);
+
+       return 0;
+}
+
+static inline uint16_t
+pdcp_pre_process_uplane_sn_12_dl_flags(const struct rte_pdcp_entity *entity,
+                                      struct rte_mbuf *in_mb[], struct 
rte_crypto_op *cop[],
+                                      uint16_t num, uint16_t *nb_err_ret,
+                                      const bool is_integ_protected)
+{
+       struct entity_priv *en_priv = entity_priv_get(entity);
+       struct rte_pdcp_up_data_pdu_sn_12_hdr *pdu_hdr;
+       uint16_t nb_cop, nb_prep = 0, nb_err = 0;
+       struct rte_mbuf *mb;
+       int32_t rsn = 0;
+       uint32_t count;
+       int i;
+
+       const uint8_t data_offset = en_priv->hdr_sz + en_priv->aad_sz;
+
+       nb_cop = rte_crypto_op_bulk_alloc(en_priv->cop_pool, 
RTE_CRYPTO_OP_TYPE_SYMMETRIC, cop,
+                                         num);
+
+       const uint32_t rx_deliv = en_priv->state.rx_deliv;
+
+       for (i = 0; i < nb_cop; i++) {
+               mb = in_mb[i];
+               pdu_hdr = rte_pktmbuf_mtod(mb, struct 
rte_pdcp_up_data_pdu_sn_12_hdr *);
+
+               /* Check for PDU type */
+               if (likely(pdu_hdr->d_c == RTE_PDCP_PDU_TYPE_DATA))
+                       rsn = ((pdu_hdr->sn_11_8 << 8) | (pdu_hdr->sn_7_0));
+               else
+                       rte_panic("TODO: Control PDU not handled");
+
+               if (unlikely(pdcp_sn_count_get(rx_deliv, rsn, &count,
+                                              RTE_SECURITY_PDCP_SN_SIZE_12))) {
+                       in_mb[nb_err++] = mb;
+                       continue;
+               }
+               cop_prepare(en_priv, mb, cop[nb_prep++], data_offset, count, 
is_integ_protected);
+       }
+
+       if (unlikely(nb_err))
+               rte_mempool_put_bulk(en_priv->cop_pool, (void *)&cop[nb_prep], 
nb_cop - nb_prep);
+
+       *nb_err_ret = num - nb_prep;
+
+       return nb_prep;
+}
+
+static uint16_t
+pdcp_pre_process_uplane_sn_12_dl_ip(const struct rte_pdcp_entity *entity, 
struct rte_mbuf *mb[],
+                                   struct rte_crypto_op *cop[], uint16_t num, 
uint16_t *nb_err)
+{
+       return pdcp_pre_process_uplane_sn_12_dl_flags(entity, mb, cop, num, 
nb_err, true);
+}
+
+static uint16_t
+pdcp_pre_process_uplane_sn_12_dl(const struct rte_pdcp_entity *entity, struct 
rte_mbuf *mb[],
+                                struct rte_crypto_op *cop[], uint16_t num, 
uint16_t *nb_err)
+{
+       return pdcp_pre_process_uplane_sn_12_dl_flags(entity, mb, cop, num, 
nb_err, false);
+}
+
+static inline uint16_t
+pdcp_pre_process_uplane_sn_18_dl_flags(const struct rte_pdcp_entity *entity,
+                                      struct rte_mbuf *in_mb[], struct 
rte_crypto_op *cop[],
+                                      uint16_t num, uint16_t *nb_err_ret,
+                                      const bool is_integ_protected)
+{
+       struct entity_priv *en_priv = entity_priv_get(entity);
+       struct rte_pdcp_up_data_pdu_sn_18_hdr *pdu_hdr;
+       uint16_t nb_cop, nb_prep = 0, nb_err = 0;
+       struct rte_mbuf *mb;
+       int32_t rsn = 0;
+       uint32_t count;
+       int i;
+
+       const uint8_t data_offset = en_priv->hdr_sz + en_priv->aad_sz;
+       nb_cop = rte_crypto_op_bulk_alloc(en_priv->cop_pool, 
RTE_CRYPTO_OP_TYPE_SYMMETRIC, cop,
+                                         num);
+
+       const uint32_t rx_deliv = en_priv->state.rx_deliv;
+
+       for (i = 0; i < nb_cop; i++) {
+               mb = in_mb[i];
+               pdu_hdr = rte_pktmbuf_mtod(mb, struct 
rte_pdcp_up_data_pdu_sn_18_hdr *);
+
+               /* Check for PDU type */
+               if (likely(pdu_hdr->d_c == RTE_PDCP_PDU_TYPE_DATA))
+                       rsn = ((pdu_hdr->sn_17_16 << 16) | (pdu_hdr->sn_15_8 << 
8) |
+                              (pdu_hdr->sn_7_0));
+               else
+                       rte_panic("TODO: Control PDU not handled");
+
+               if (unlikely(pdcp_sn_count_get(rx_deliv, rsn, &count,
+                                              RTE_SECURITY_PDCP_SN_SIZE_18))) {
+                       in_mb[nb_err++] = mb;
+                       continue;
+               }
+               cop_prepare(en_priv, mb, cop[nb_prep++], data_offset, count, 
is_integ_protected);
+       }
+
+       if (unlikely(nb_err))
+               /* Using mempool API since crypto API is not providing bulk 
free */
+               rte_mempool_put_bulk(en_priv->cop_pool, (void *)&cop[nb_prep], 
nb_cop - nb_prep);
+
+       *nb_err_ret = num - nb_prep;
+
+       return nb_prep;
+}
+
+static uint16_t
+pdcp_pre_process_uplane_sn_18_dl_ip(const struct rte_pdcp_entity *entity, 
struct rte_mbuf *mb[],
+                                   struct rte_crypto_op *cop[], uint16_t num, 
uint16_t *nb_err)
+{
+       return pdcp_pre_process_uplane_sn_18_dl_flags(entity, mb, cop, num, 
nb_err, true);
+}
+
+static uint16_t
+pdcp_pre_process_uplane_sn_18_dl(const struct rte_pdcp_entity *entity, struct 
rte_mbuf *mb[],
+                                struct rte_crypto_op *cop[], uint16_t num, 
uint16_t *nb_err)
+{
+       return pdcp_pre_process_uplane_sn_18_dl_flags(entity, mb, cop, num, 
nb_err, false);
+}
+
+static uint16_t
+pdcp_pre_process_cplane_sn_12_dl(const struct rte_pdcp_entity *entity, struct 
rte_mbuf *in_mb[],
+                                struct rte_crypto_op *cop[], uint16_t num, 
uint16_t *nb_err_ret)
+{
+       struct entity_priv *en_priv = entity_priv_get(entity);
+       struct rte_pdcp_cp_data_pdu_sn_12_hdr *pdu_hdr;
+       uint16_t nb_cop, nb_prep = 0, nb_err = 0;
+       struct rte_mbuf *mb;
+       uint32_t count;
+       int32_t rsn;
+       int i;
+
+       const uint8_t data_offset = en_priv->hdr_sz + en_priv->aad_sz;
+
+       nb_cop = rte_crypto_op_bulk_alloc(en_priv->cop_pool, 
RTE_CRYPTO_OP_TYPE_SYMMETRIC, cop,
+                                         num);
+
+       const uint32_t rx_deliv = en_priv->state.rx_deliv;
+
+       for (i = 0; i < nb_cop; i++) {
+               mb = in_mb[i];
+               pdu_hdr = rte_pktmbuf_mtod(mb, struct 
rte_pdcp_cp_data_pdu_sn_12_hdr *);
+               rsn = ((pdu_hdr->sn_11_8 << 8) | (pdu_hdr->sn_7_0));
+               if (unlikely(pdcp_sn_count_get(rx_deliv, rsn, &count,
+                                              RTE_SECURITY_PDCP_SN_SIZE_12))) {
+                       in_mb[nb_err++] = mb;
+                       continue;
+               }
+               cop_prepare(en_priv, mb, cop[nb_prep++], data_offset, count, 
true);
+       }
+
+       if (unlikely(nb_err))
+               /* Using mempool API since crypto API is not providing bulk 
free */
+               rte_mempool_put_bulk(en_priv->cop_pool, (void *)&cop[nb_prep], 
nb_cop - nb_prep);
+
+       *nb_err_ret = num - nb_prep;
+
+       return nb_prep;
+}
+
+static inline void
+pdcp_packet_strip(struct rte_mbuf *mb, const uint32_t hdr_trim_sz, const bool 
trim_mac)
+{
+       char *p = rte_pktmbuf_adj(mb, hdr_trim_sz);
+       RTE_VERIFY(p != NULL);
+
+       if (trim_mac) {
+               int ret = rte_pktmbuf_trim(mb, PDCP_MAC_I_LEN);
+               RTE_VERIFY(ret == 0);
+       }
+}
+
+static inline bool
+pdcp_post_process_update_entity_state(const struct rte_pdcp_entity *entity,
+                                     const uint32_t count)
+{
+       struct entity_priv *en_priv = entity_priv_get(entity);
+
+       if (count < en_priv->state.rx_deliv)
+               return false;
+
+       /* t-Reordering timer is not supported - SDU will be delivered 
immediately.
+        * Update RX_DELIV to the COUNT value of the first PDCP SDU which has 
not
+        * been delivered to upper layers
+        */
+       en_priv->state.rx_next = count + 1;
+
+       if (count >= en_priv->state.rx_next)
+               en_priv->state.rx_next = count + 1;
+
+       return true;
+}
+
+static inline uint16_t
+pdcp_post_process_uplane_sn_12_dl_flags(const struct rte_pdcp_entity *entity,
+                                       struct rte_mbuf *in_mb[],
+                                       struct rte_mbuf *out_mb[],
+                                       uint16_t num, uint16_t *nb_err_ret,
+                                       const bool is_integ_protected)
+{
+       struct entity_priv *en_priv = entity_priv_get(entity);
+       struct rte_pdcp_up_data_pdu_sn_12_hdr *pdu_hdr;
+       int i, nb_success = 0, nb_err = 0, rsn = 0;
+       const uint32_t aad_sz = en_priv->aad_sz;
+       struct rte_mbuf *err_mb[num];
+       struct rte_mbuf *mb;
+       uint32_t count;
+
+       const uint32_t hdr_trim_sz = en_priv->hdr_sz + aad_sz;
+
+       for (i = 0; i < num; i++) {
+               mb = in_mb[i];
+               if (unlikely(mb->ol_flags & RTE_MBUF_F_RX_SEC_OFFLOAD_FAILED))
+                       goto error;
+               pdu_hdr = rte_pktmbuf_mtod_offset(mb, struct 
rte_pdcp_up_data_pdu_sn_12_hdr *,
+                                                 aad_sz);
+
+               /* Check for PDU type */
+               if (likely(pdu_hdr->d_c == RTE_PDCP_PDU_TYPE_DATA))
+                       rsn = ((pdu_hdr->sn_11_8 << 8) | (pdu_hdr->sn_7_0));
+               else
+                       rte_panic("Control PDU should not be received");
+
+               if (unlikely(pdcp_sn_count_get(en_priv->state.rx_deliv, rsn, 
&count,
+                                              RTE_SECURITY_PDCP_SN_SIZE_12)))
+                       goto error;
+
+               if (unlikely(!pdcp_post_process_update_entity_state(entity, 
count)))
+                       goto error;
+
+               pdcp_packet_strip(mb, hdr_trim_sz, is_integ_protected);
+               out_mb[nb_success++] = mb;
+               continue;
+
+error:
+               err_mb[nb_err++] = mb;
+       }
+
+       if (unlikely(nb_err != 0))
+               rte_memcpy(&out_mb[nb_success], err_mb, nb_err * sizeof(struct 
rte_mbuf *));
+
+       *nb_err_ret = nb_err;
+       return nb_success;
+}
+
+static uint16_t
+pdcp_post_process_uplane_sn_12_dl_ip(const struct rte_pdcp_entity *entity,
+                                    struct rte_mbuf *in_mb[],
+                                    struct rte_mbuf *out_mb[],
+                                    uint16_t num, uint16_t *nb_err)
+{
+       return pdcp_post_process_uplane_sn_12_dl_flags(entity, in_mb, out_mb, 
num, nb_err, true);
+}
+
+static uint16_t
+pdcp_post_process_uplane_sn_12_dl(const struct rte_pdcp_entity *entity,
+                                 struct rte_mbuf *in_mb[],
+                                 struct rte_mbuf *out_mb[],
+                                 uint16_t num, uint16_t *nb_err)
+{
+       return pdcp_post_process_uplane_sn_12_dl_flags(entity, in_mb, out_mb, 
num, nb_err, false);
+}
+
+static inline uint16_t
+pdcp_post_process_uplane_sn_18_dl_flags(const struct rte_pdcp_entity *entity,
+                                       struct rte_mbuf *in_mb[],
+                                       struct rte_mbuf *out_mb[],
+                                       uint16_t num, uint16_t *nb_err_ret,
+                                       const bool is_integ_protected)
+{
+       struct entity_priv *en_priv = entity_priv_get(entity);
+       struct rte_pdcp_up_data_pdu_sn_18_hdr *pdu_hdr;
+       const uint32_t aad_sz = en_priv->aad_sz;
+       int i, nb_success = 0, nb_err = 0;
+       struct rte_mbuf *mb, *err_mb[num];
+       int32_t rsn = 0;
+       uint32_t count;
+
+       const uint32_t hdr_trim_sz = en_priv->hdr_sz + aad_sz;
+
+       for (i = 0; i < num; i++) {
+               mb = in_mb[i];
+               if (unlikely(mb->ol_flags & RTE_MBUF_F_RX_SEC_OFFLOAD_FAILED))
+                       goto error;
+
+               pdu_hdr = rte_pktmbuf_mtod_offset(mb, struct 
rte_pdcp_up_data_pdu_sn_18_hdr *,
+                                                 aad_sz);
+
+               /* Check for PDU type */
+               if (likely(pdu_hdr->d_c == RTE_PDCP_PDU_TYPE_DATA))
+                       rsn = ((pdu_hdr->sn_17_16 << 16) | (pdu_hdr->sn_15_8 << 
8) |
+                              (pdu_hdr->sn_7_0));
+               else
+                       rte_panic("Control PDU should not be received");
+
+               if (unlikely(pdcp_sn_count_get(en_priv->state.rx_deliv, rsn, 
&count,
+                                              RTE_SECURITY_PDCP_SN_SIZE_18)))
+                       goto error;
+
+               if (unlikely(!pdcp_post_process_update_entity_state(entity, 
count)))
+                       goto error;
+
+               pdcp_packet_strip(mb, hdr_trim_sz, is_integ_protected);
+               out_mb[nb_success++] = mb;
+               continue;
+
+error:
+               err_mb[nb_err++] = mb;
+       }
+
+       if (unlikely(nb_err != 0))
+               rte_memcpy(&out_mb[nb_success], err_mb, nb_err * sizeof(struct 
rte_mbuf *));
+
+       *nb_err_ret = nb_err;
+       return nb_success;
+}
+
+static uint16_t
+pdcp_post_process_uplane_sn_18_dl_ip(const struct rte_pdcp_entity *entity,
+                                    struct rte_mbuf *in_mb[],
+                                    struct rte_mbuf *out_mb[],
+                                    uint16_t num, uint16_t *nb_err)
+{
+       return pdcp_post_process_uplane_sn_18_dl_flags(entity, in_mb, out_mb, 
num, nb_err, true);
+}
+
+static uint16_t
+pdcp_post_process_uplane_sn_18_dl(const struct rte_pdcp_entity *entity,
+                                 struct rte_mbuf *in_mb[],
+                                 struct rte_mbuf *out_mb[],
+                                 uint16_t num, uint16_t *nb_err)
+{
+       return pdcp_post_process_uplane_sn_18_dl_flags(entity, in_mb, out_mb, 
num, nb_err, false);
+}
+
+static uint16_t
+pdcp_post_process_cplane_sn_12_dl(const struct rte_pdcp_entity *entity,
+                                 struct rte_mbuf *in_mb[],
+                                 struct rte_mbuf *out_mb[],
+                                 uint16_t num, uint16_t *nb_err_ret)
+{
+       struct entity_priv *en_priv = entity_priv_get(entity);
+       struct rte_pdcp_cp_data_pdu_sn_12_hdr *pdu_hdr;
+       const uint32_t aad_sz = en_priv->aad_sz;
+       int i, nb_success = 0, nb_err = 0;
+       struct rte_mbuf *err_mb[num];
+       struct rte_mbuf *mb;
+       uint32_t count;
+       int32_t rsn;
+
+       const uint32_t hdr_trim_sz = en_priv->hdr_sz + aad_sz;
+
+       for (i = 0; i < num; i++) {
+               mb = in_mb[i];
+               if (unlikely(mb->ol_flags & RTE_MBUF_F_RX_SEC_OFFLOAD_FAILED))
+                       goto error;
+
+               pdu_hdr = rte_pktmbuf_mtod_offset(mb, struct 
rte_pdcp_cp_data_pdu_sn_12_hdr *,
+                                                 aad_sz);
+               rsn = ((pdu_hdr->sn_11_8 << 8) | (pdu_hdr->sn_7_0));
+
+               if (unlikely(pdcp_sn_count_get(en_priv->state.rx_deliv, rsn, 
&count,
+                                              RTE_SECURITY_PDCP_SN_SIZE_12)))
+                       goto error;
+
+               if (unlikely(!pdcp_post_process_update_entity_state(entity, 
count)))
+                       goto error;
+
+               pdcp_packet_strip(mb, hdr_trim_sz, true);
+
+               out_mb[nb_success++] = mb;
+               continue;
+
+error:
+               err_mb[nb_err++] = mb;
+       }
+
+       if (unlikely(nb_err != 0))
+               rte_memcpy(&out_mb[nb_success], err_mb, nb_err * sizeof(struct 
rte_mbuf *));
+
+       *nb_err_ret = nb_err;
+       return nb_success;
+}
+
 static int
 pdcp_pre_post_func_set(struct rte_pdcp_entity *entity, const struct 
rte_pdcp_entity_conf *conf)
 {
+       struct entity_priv *en_priv = entity_priv_get(entity);
+
        entity->pre_process = NULL;
        entity->post_process = NULL;
 
@@ -342,6 +756,13 @@ pdcp_pre_post_func_set(struct rte_pdcp_entity *entity, 
const struct rte_pdcp_ent
                entity->post_process = pdcp_post_process_ul;
        }
 
+       if ((conf->pdcp_xfrm.domain == RTE_SECURITY_PDCP_MODE_CONTROL) &&
+           (conf->pdcp_xfrm.sn_size == RTE_SECURITY_PDCP_SN_SIZE_12) &&
+           (conf->pdcp_xfrm.pkt_dir == RTE_SECURITY_PDCP_DOWNLINK)) {
+               entity->pre_process = pdcp_pre_process_cplane_sn_12_dl;
+               entity->post_process = pdcp_post_process_cplane_sn_12_dl;
+       }
+
        if ((conf->pdcp_xfrm.domain == RTE_SECURITY_PDCP_MODE_DATA) &&
            (conf->pdcp_xfrm.sn_size == RTE_SECURITY_PDCP_SN_SIZE_12) &&
            (conf->pdcp_xfrm.pkt_dir == RTE_SECURITY_PDCP_UPLINK)) {
@@ -356,6 +777,38 @@ pdcp_pre_post_func_set(struct rte_pdcp_entity *entity, 
const struct rte_pdcp_ent
                entity->post_process = pdcp_post_process_ul;
        }
 
+       if ((conf->pdcp_xfrm.domain == RTE_SECURITY_PDCP_MODE_DATA) &&
+           (conf->pdcp_xfrm.sn_size == RTE_SECURITY_PDCP_SN_SIZE_12) &&
+           (conf->pdcp_xfrm.pkt_dir == RTE_SECURITY_PDCP_DOWNLINK) &&
+           (en_priv->flags.is_authenticated)) {
+               entity->pre_process = pdcp_pre_process_uplane_sn_12_dl_ip;
+               entity->post_process = pdcp_post_process_uplane_sn_12_dl_ip;
+       }
+
+       if ((conf->pdcp_xfrm.domain == RTE_SECURITY_PDCP_MODE_DATA) &&
+           (conf->pdcp_xfrm.sn_size == RTE_SECURITY_PDCP_SN_SIZE_12) &&
+           (conf->pdcp_xfrm.pkt_dir == RTE_SECURITY_PDCP_DOWNLINK) &&
+           (!en_priv->flags.is_authenticated)) {
+               entity->pre_process = pdcp_pre_process_uplane_sn_12_dl;
+               entity->post_process = pdcp_post_process_uplane_sn_12_dl;
+       }
+
+       if ((conf->pdcp_xfrm.domain == RTE_SECURITY_PDCP_MODE_DATA) &&
+           (conf->pdcp_xfrm.sn_size == RTE_SECURITY_PDCP_SN_SIZE_18) &&
+           (conf->pdcp_xfrm.pkt_dir == RTE_SECURITY_PDCP_DOWNLINK) &&
+           (en_priv->flags.is_authenticated)) {
+               entity->pre_process = pdcp_pre_process_uplane_sn_18_dl_ip;
+               entity->post_process = pdcp_post_process_uplane_sn_18_dl_ip;
+       }
+
+       if ((conf->pdcp_xfrm.domain == RTE_SECURITY_PDCP_MODE_DATA) &&
+           (conf->pdcp_xfrm.sn_size == RTE_SECURITY_PDCP_SN_SIZE_18) &&
+           (conf->pdcp_xfrm.pkt_dir == RTE_SECURITY_PDCP_DOWNLINK) &&
+           (!en_priv->flags.is_authenticated)) {
+               entity->pre_process = pdcp_pre_process_uplane_sn_18_dl;
+               entity->post_process = pdcp_post_process_uplane_sn_18_dl;
+       }
+
        if (entity->pre_process == NULL || entity->post_process == NULL)
                return -ENOTSUP;
 
-- 
2.25.1

Reply via email to