On 12/20/2018 7:36 PM, Ananyev, Konstantin wrote:
>
>>> diff --git a/lib/librte_ipsec/sa.c b/lib/librte_ipsec/sa.c
>>> new file mode 100644
>>> index 000000000..f927a82bf
>>> --- /dev/null
>>> +++ b/lib/librte_ipsec/sa.c
>>> @@ -0,0 +1,327 @@
>>> +/* SPDX-License-Identifier: BSD-3-Clause
>>> + * Copyright(c) 2018 Intel Corporation
>>> + */
>>> +
>>> +#include <rte_ipsec_sa.h>
>>> +#include <rte_esp.h>
>>> +#include <rte_ip.h>
>>> +#include <rte_errno.h>
>>> +
>>> +#include "sa.h"
>>> +#include "ipsec_sqn.h"
>>> +
>>> +/* some helper structures */
>>> +struct crypto_xform {
>>> +   struct rte_crypto_auth_xform *auth;
>>> +   struct rte_crypto_cipher_xform *cipher;
>>> +   struct rte_crypto_aead_xform *aead;
>>> +};
>> shouldn't this be union as aead cannot be with cipher and auth cases.
> That's used internally to collect/analyze xforms provided by 
> prm->crypto_xform.

>
>
>> extra line
>>> +
>>> +
>>> +static int
>>> +check_crypto_xform(struct crypto_xform *xform)
>>> +{
>>> +   uintptr_t p;
>>> +
>>> +   p = (uintptr_t)xform->auth | (uintptr_t)xform->cipher;
>> what is the intent of this?
> It is used below to check that if aead is present both cipher and auth
> are  not.
>
>>> +
>>> +   /* either aead or both auth and cipher should be not NULLs */
>>> +   if (xform->aead) {
>>> +           if (p)
>>> +                   return -EINVAL;
>>> +   } else if (p == (uintptr_t)xform->auth) {
>>> +           return -EINVAL;
>>> +   }
>> This function does not look good. It will miss the case of cipher only
> Cipher only is not supported right now and  I am not aware about plans
> to support it in future.
> If someone would like to add cipher onl,then yes he/she probably would
> have to update this function.
I know that cipher_only is not supported and nobody will support it in 
case of ipsec.
My point is if somebody gives only auth or only cipher xform, then this 
function would not be able to detect that case and will not return error.

>>> +
>>> +   return 0;
>>> +}
>>> +
>>> +static int
>>> +fill_crypto_xform(struct crypto_xform *xform,
>>> +   const struct rte_ipsec_sa_prm *prm)
>>> +{
>>> +   struct rte_crypto_sym_xform *xf;
>>> +
>>> +   memset(xform, 0, sizeof(*xform));
>>> +
>>> +   for (xf = prm->crypto_xform; xf != NULL; xf = xf->next) {
>>> +           if (xf->type == RTE_CRYPTO_SYM_XFORM_AUTH) {
>>> +                   if (xform->auth != NULL)
>>> +                           return -EINVAL;
>>> +                   xform->auth = &xf->auth;
>>> +           } else if (xf->type == RTE_CRYPTO_SYM_XFORM_CIPHER) {
>>> +                   if (xform->cipher != NULL)
>>> +                           return -EINVAL;
>>> +                   xform->cipher = &xf->cipher;
>>> +           } else if (xf->type == RTE_CRYPTO_SYM_XFORM_AEAD) {
>>> +                   if (xform->aead != NULL)
>>> +                           return -EINVAL;
>>> +                   xform->aead = &xf->aead;
>>> +           } else
>>> +                   return -EINVAL;
>>> +   }
>>> +
>>> +   return check_crypto_xform(xform);
>>> +}
>> how is this function handling the inbound and outbound cases.
>> In inbound first xform is auth and then cipher.
>> In outbound first is cipher and then auth. I think this should be
>> checked in the lib.
> Interesting, I didn't know about such limitation.
> My understanding was that the any order (<auth,cipher>, <cipher,auth>)
> for both inbound and outbound is acceptable.
> Is that order restriction is documented somewhere?
/**
  * Symmetric crypto transform structure.
  *
  * This is used to specify the crypto transforms required, multiple 
transforms
  * can be chained together to specify a chain transforms such as 
authentication
  * then cipher, or cipher then authentication. Each transform structure can
  * hold a single transform, the type field is used to specify which 
transform
  * is contained within the union
  */
struct rte_crypto_sym_xform {

This is not a limitation, this is how it is designed to handle 2 cases 
of crypto - auth then cipher and cipher then auth.


>> Here for loop should not be there, as there would be at max only 2 xforms.
>>> +
>>> +uint64_t __rte_experimental
>>> +rte_ipsec_sa_type(const struct rte_ipsec_sa *sa)
>>> +{
>>> +   return sa->type;
>>> +}
>>> +
>>> +static int32_t
>>> +ipsec_sa_size(uint32_t wsz, uint64_t type, uint32_t *nb_bucket)
>>> +{
>>> +   uint32_t n, sz;
>>> +
>>> +   n = 0;
>>> +   if (wsz != 0 && (type & RTE_IPSEC_SATP_DIR_MASK) ==
>>> +                   RTE_IPSEC_SATP_DIR_IB)
>>> +           n = replay_num_bucket(wsz);
>>> +
>>> +   if (n > WINDOW_BUCKET_MAX)
>>> +           return -EINVAL;
>>> +
>>> +   *nb_bucket = n;
>>> +
>>> +   sz = rsn_size(n);
>>> +   sz += sizeof(struct rte_ipsec_sa);
>>> +   return sz;
>>> +}
>>> +
>>> +void __rte_experimental
>>> +rte_ipsec_sa_fini(struct rte_ipsec_sa *sa)
>>> +{
>>> +   memset(sa, 0, sa->size);
>>> +}
>> Where is the memory of "sa" getting initialized?
> Not sure I understand your question...
> Do you mean we missed memset(sa, 0, size)
> in rte_ipsec_sa_init()?
Sorry I did not ask the correct question, I was asking  - where it is 
allocated?
Is it application's responsibility?
>
>
>>> +
>>> +int __rte_experimental
>>> +rte_ipsec_sa_init(struct rte_ipsec_sa *sa, const struct rte_ipsec_sa_prm 
>>> *prm,
>>> +   uint32_t size)
>>> +{
>>> +   int32_t rc, sz;
>>> +   uint32_t nb;
>>> +   uint64_t type;
>>> +   struct crypto_xform cxf;
>>> +
>>> +   if (sa == NULL || prm == NULL)
>>> +           return -EINVAL;
>>> +
>>> +   /* determine SA type */
>>> +   rc = fill_sa_type(prm, &type);
>>> +   if (rc != 0)
>>> +           return rc;
>>> +
>>> +   /* determine required size */
>>> +   sz = ipsec_sa_size(prm->replay_win_sz, type, &nb);
>>> +   if (sz < 0)
>>> +           return sz;
>>> +   else if (size < (uint32_t)sz)
>>> +           return -ENOSPC;
>>> +
>>> +   /* only esp is supported right now */
>>> +   if (prm->ipsec_xform.proto != RTE_SECURITY_IPSEC_SA_PROTO_ESP)
>>> +           return -EINVAL;
>>> +
>>> +   if (prm->ipsec_xform.mode == RTE_SECURITY_IPSEC_SA_MODE_TUNNEL &&
>>> +                   prm->tun.hdr_len > sizeof(sa->hdr))
>>> +           return -EINVAL;
>>> +
>>> +   rc = fill_crypto_xform(&cxf, prm);
>>> +   if (rc != 0)
>>> +           return rc;
>>> +
>>> +   sa->type = type;
>>> +   sa->size = sz;
>>> +
>>> +   /* check for ESN flag */
>>> +   sa->sqn_mask = (prm->ipsec_xform.options.esn == 0) ?
>>> +           UINT32_MAX : UINT64_MAX;
>>> +
>>> +   rc = esp_sa_init(sa, prm, &cxf);
>>> +   if (rc != 0)
>>> +           rte_ipsec_sa_fini(sa);
>>> +
>>> +   /* fill replay window related fields */
>>> +   if (nb != 0) {
>> move this where nb is getting updated.
> I don't think it is a good idea.
> We calulate nb first and required sa size first without updating provided 
> memory buffer.
> If the buffer is not big enough, will return an error without updating the 
> buffer.
> Cleaner and safer to keep it as it is.
ok
>>> +           sa->replay.win_sz = prm->replay_win_sz;
>>> +           sa->replay.nb_bucket = nb;
>>> +           sa->replay.bucket_index_mask = sa->replay.nb_bucket - 1;
>>> +           sa->sqn.inb = (struct replay_sqn *)(sa + 1);
>>> +   }
>>> +
>>> +   return sz;
>>> +}

Reply via email to