Hi Tomasz,

I can see compilation failure in the patchwork CI tests.
Could you please check.

Thanks.

On 3/7/2019 5:32 PM, Tomasz Jozwiak wrote:
> This patch adds dynamic SGL allocation instead of static one.
> The number of element in SGL can be adjusted in each operation
> depend of the request.
>
> Signed-off-by: Tomasz Jozwiak <tomaszx.jozw...@intel.com>
> ---
>   config/common_base                   |  1 -
>   doc/guides/compressdevs/qat_comp.rst |  1 -
>   doc/guides/cryptodevs/qat.rst        |  5 ----
>   drivers/compress/qat/qat_comp.c      | 58 
> ++++++++++++++++++++++++++++++++----
>   drivers/compress/qat/qat_comp.h      | 13 ++++----
>   drivers/compress/qat/qat_comp_pmd.c  | 49 +++++++++++++++++++++++++-----
>   6 files changed, 101 insertions(+), 26 deletions(-)
>
> diff --git a/config/common_base b/config/common_base
> index 0b09a93..91c7b73 100644
> --- a/config/common_base
> +++ b/config/common_base
> @@ -549,7 +549,6 @@ CONFIG_RTE_LIBRTE_PMD_QAT_SYM=n
>   # Max. number of QuickAssist devices, which can be detected and attached
>   #
>   CONFIG_RTE_PMD_QAT_MAX_PCI_DEVICES=48
> -CONFIG_RTE_PMD_QAT_COMP_SGL_MAX_SEGMENTS=16
>   CONFIG_RTE_PMD_QAT_COMP_IM_BUFFER_SIZE=65536
>   
>   #
> diff --git a/doc/guides/compressdevs/qat_comp.rst 
> b/doc/guides/compressdevs/qat_comp.rst
> index 5631cb1..6f583a4 100644
> --- a/doc/guides/compressdevs/qat_comp.rst
> +++ b/doc/guides/compressdevs/qat_comp.rst
> @@ -35,7 +35,6 @@ Limitations
>   * Compressdev level 0, no compression, is not supported.
>   * Queue pairs are not thread-safe (that is, within a single queue pair, RX 
> and TX from different lcores is not supported).
>   * No BSD support as BSD QAT kernel driver not available.
> -* Number of segments in mbuf chains in the op must be <= 
> RTE_PMD_QAT_COMP_SGL_MAX_SEGMENTS from the config file.
>   * When using Deflate dynamic huffman encoding for compression, the input 
> size (op.src.length)
>     must be < CONFIG_RTE_PMD_QAT_COMP_IM_BUFFER_SIZE from the config file,
>     see :ref:`building_qat_config` for more details.
> diff --git a/doc/guides/cryptodevs/qat.rst b/doc/guides/cryptodevs/qat.rst
> index b7eace1..03bd0c1 100644
> --- a/doc/guides/cryptodevs/qat.rst
> +++ b/doc/guides/cryptodevs/qat.rst
> @@ -156,7 +156,6 @@ These are the build configuration options affecting QAT, 
> and their default value
>       CONFIG_RTE_LIBRTE_PMD_QAT=y
>       CONFIG_RTE_LIBRTE_PMD_QAT_SYM=n
>       CONFIG_RTE_PMD_QAT_MAX_PCI_DEVICES=48
> -     CONFIG_RTE_PMD_QAT_COMP_SGL_MAX_SEGMENTS=16
>       CONFIG_RTE_PMD_QAT_COMP_IM_BUFFER_SIZE=65536
>   
>   CONFIG_RTE_LIBRTE_PMD_QAT must be enabled for any QAT PMD to be built.
> @@ -174,10 +173,6 @@ Note, there are separate config items for max cryptodevs 
> CONFIG_RTE_CRYPTO_MAX_D
>   and max compressdevs CONFIG_RTE_COMPRESS_MAX_DEVS, if necessary these 
> should be
>   adjusted to handle the total of QAT and other devices which the process 
> will use.
>   
> -QAT allocates internal structures to handle SGLs. For the compression service
> -CONFIG_RTE_PMD_QAT_COMP_SGL_MAX_SEGMENTS can be changed if more segments are 
> needed.
> -An extra (max_inflight_ops x 16) bytes per queue_pair will be used for every 
> increment.
> -
>   QAT compression PMD needs intermediate buffers to support Deflate 
> compression
>   with Dynamic Huffman encoding. CONFIG_RTE_PMD_QAT_COMP_IM_BUFFER_SIZE
>   specifies the size of a single buffer, the PMD will allocate a multiple of 
> these,
> diff --git a/drivers/compress/qat/qat_comp.c b/drivers/compress/qat/qat_comp.c
> index 32ca753..c021f4a 100644
> --- a/drivers/compress/qat/qat_comp.c
> +++ b/drivers/compress/qat/qat_comp.c
> @@ -1,5 +1,5 @@
>   /* SPDX-License-Identifier: BSD-3-Clause
> - * Copyright(c) 2018 Intel Corporation
> + * Copyright(c) 2018-2019 Intel Corporation
>    */
>   
>   #include <rte_mempool.h>
> @@ -55,22 +55,70 @@ qat_comp_build_request(void *in_op, uint8_t *out_msg,
>               ICP_QAT_FW_COMN_PTR_TYPE_SET(comp_req->comn_hdr.comn_req_flags,
>                               QAT_COMN_PTR_TYPE_SGL);
>   
> +             if (unlikely(op->m_src->nb_segs > cookie->src_nb_elems)) {
> +                     /* we need to allocate more elements in SGL*/
> +                     void *tmp;
> +
> +                     tmp = rte_realloc_socket(cookie->qat_sgl_src_d,
> +                                       sizeof(struct qat_sgl) +
> +                                       sizeof(struct qat_flat_buf) *
> +                                       op->m_src->nb_segs, 64,
> +                                       rte_socket_id());
> +
> +                     if (unlikely(tmp == NULL)) {
> +                             QAT_DP_LOG(ERR, "QAT PMD can't allocate memory"
> +                                        " for %d elements of SGL",
> +                                        op->m_src->nb_segs);
> +                             op->status = RTE_COMP_OP_STATUS_INVALID_ARGS;
> +                             return -ENOMEM;
> +                     }
> +                     /* new SGL is valid now */
> +                     cookie->qat_sgl_src_d = (struct qat_sgl *)tmp;
> +                     cookie->src_nb_elems = op->m_src->nb_segs;
> +                     cookie->qat_sgl_src_phys_addr =
> +                             rte_malloc_virt2iova(cookie->qat_sgl_src_d);
> +             }
> +
>               ret = qat_sgl_fill_array(op->m_src,
>                               op->src.offset,
> -                             &cookie->qat_sgl_src,
> +                             cookie->qat_sgl_src_d,
>                               op->src.length,
> -                             RTE_PMD_QAT_COMP_SGL_MAX_SEGMENTS);
> +                             (const uint16_t)cookie->src_nb_elems);
>               if (ret) {
>                       QAT_DP_LOG(ERR, "QAT PMD Cannot fill source sgl array");
>                       op->status = RTE_COMP_OP_STATUS_INVALID_ARGS;
>                       return ret;
>               }
>   
> +             if (unlikely(op->m_dst->nb_segs > cookie->dst_nb_elems)) {
> +                     /* we need to allocate more elements in SGL*/
> +                     struct qat_sgl *tmp;
> +
> +                     tmp = rte_realloc_socket(cookie->qat_sgl_dst_d,
> +                                       sizeof(struct qat_sgl) +
> +                                       sizeof(struct qat_flat_buf) *
> +                                       op->m_dst->nb_segs, 64,
> +                                       rte_socket_id());
> +
> +                     if (unlikely(tmp == NULL)) {
> +                             QAT_DP_LOG(ERR, "QAT PMD can't allocate memory"
> +                                        " for %d elements of SGL",
> +                                        op->m_dst->nb_segs);
> +                             op->status = RTE_COMP_OP_STATUS_INVALID_ARGS;
> +                             return -EINVAL;
> +                     }
> +                     /* new SGL is valid now */
> +                     cookie->qat_sgl_dst_d = (struct qat_sgl *)tmp;
> +                     cookie->dst_nb_elems = op->m_dst->nb_segs;
> +                     cookie->qat_sgl_dst_phys_addr =
> +                             rte_malloc_virt2iova(cookie->qat_sgl_dst_d);
> +             }
> +
>               ret = qat_sgl_fill_array(op->m_dst,
>                               op->dst.offset,
> -                             &cookie->qat_sgl_dst,
> +                             cookie->qat_sgl_dst_d,
>                               comp_req->comp_pars.out_buffer_sz,
> -                             RTE_PMD_QAT_COMP_SGL_MAX_SEGMENTS);
> +                             (const uint16_t)cookie->dst_nb_elems);
>               if (ret) {
>                       QAT_DP_LOG(ERR, "QAT PMD Cannot fill dest. sgl array");
>                       op->status = RTE_COMP_OP_STATUS_INVALID_ARGS;
> diff --git a/drivers/compress/qat/qat_comp.h b/drivers/compress/qat/qat_comp.h
> index 19f48df..2465f12 100644
> --- a/drivers/compress/qat/qat_comp.h
> +++ b/drivers/compress/qat/qat_comp.h
> @@ -1,5 +1,5 @@
>   /* SPDX-License-Identifier: BSD-3-Clause
> - * Copyright(c) 2015-2018 Intel Corporation
> + * Copyright(c) 2015-2019 Intel Corporation
>    */
>   
>   #ifndef _QAT_COMP_H_
> @@ -37,16 +37,15 @@ struct qat_inter_sgl {
>       struct qat_flat_buf buffers[QAT_NUM_BUFS_IN_IM_SGL];
>   } __rte_packed __rte_cache_aligned;
>   
> -struct qat_comp_sgl {
> -     qat_sgl_hdr;
> -     struct qat_flat_buf buffers[RTE_PMD_QAT_COMP_SGL_MAX_SEGMENTS];
> -} __rte_packed __rte_cache_aligned;
>   
>   struct qat_comp_op_cookie {
> -     struct qat_comp_sgl qat_sgl_src;
> -     struct qat_comp_sgl qat_sgl_dst;
>       phys_addr_t qat_sgl_src_phys_addr;
>       phys_addr_t qat_sgl_dst_phys_addr;
> +     /* dynamically created SGLs */
> +     uint16_t src_nb_elems;
> +     uint16_t dst_nb_elems;
> +     struct qat_sgl *qat_sgl_src_d;
> +     struct qat_sgl *qat_sgl_dst_d;
>   };
>   
>   struct qat_comp_xform {
> diff --git a/drivers/compress/qat/qat_comp_pmd.c 
> b/drivers/compress/qat/qat_comp_pmd.c
> index 27c8856..f034a19 100644
> --- a/drivers/compress/qat/qat_comp_pmd.c
> +++ b/drivers/compress/qat/qat_comp_pmd.c
> @@ -1,10 +1,14 @@
>   /* SPDX-License-Identifier: BSD-3-Clause
> - * Copyright(c) 2015-2018 Intel Corporation
> + * Copyright(c) 2015-2019 Intel Corporation
>    */
>   
> +#include <rte_malloc.h>
> +
>   #include "qat_comp.h"
>   #include "qat_comp_pmd.h"
>   
> +#define QAT_PMD_COMP_SGL_DEF_SEGMENTS 16
> +
>   static const struct rte_compressdev_capabilities 
> qat_comp_gen_capabilities[] = {
>       {/* COMPRESSION - deflate */
>        .algo = RTE_COMP_ALGO_DEFLATE,
> @@ -60,6 +64,10 @@ static int
>   qat_comp_qp_release(struct rte_compressdev *dev, uint16_t queue_pair_id)
>   {
>       struct qat_comp_dev_private *qat_private = dev->data->dev_private;
> +     struct qat_qp **qp_addr =
> +             (struct qat_qp **)&(dev->data->queue_pairs[queue_pair_id]);
> +     struct qat_qp *qp = (struct qat_qp *)*qp_addr;
> +     uint32_t i;
>   
>       QAT_LOG(DEBUG, "Release comp qp %u on device %d",
>                               queue_pair_id, dev->data->dev_id);
> @@ -67,6 +75,14 @@ qat_comp_qp_release(struct rte_compressdev *dev, uint16_t 
> queue_pair_id)
>       qat_private->qat_dev->qps_in_use[QAT_SERVICE_COMPRESSION][queue_pair_id]
>                                               = NULL;
>   
> +     for (i = 0; i < qp->nb_descriptors; i++) {
> +
> +             struct qat_comp_op_cookie *cookie = qp->op_cookies[i];
> +
> +             rte_free(cookie->qat_sgl_src_d);
> +             rte_free(cookie->qat_sgl_dst_d);
> +     }
> +
>       return qat_qp_release((struct qat_qp **)
>                       &(dev->data->queue_pairs[queue_pair_id]));
>   }
> @@ -122,15 +138,34 @@ qat_comp_qp_setup(struct rte_compressdev *dev, uint16_t 
> qp_id,
>               struct qat_comp_op_cookie *cookie =
>                               qp->op_cookies[i];
>   
> +             cookie->qat_sgl_src_d = rte_zmalloc_socket(NULL,
> +                                     sizeof(struct qat_sgl) +
> +                                     sizeof(struct qat_flat_buf) *
> +                                     QAT_PMD_COMP_SGL_DEF_SEGMENTS,
> +                                     64, socket_id);
> +
> +             cookie->qat_sgl_dst_d = rte_zmalloc_socket(NULL,
> +                                     sizeof(struct qat_sgl) +
> +                                     sizeof(struct qat_flat_buf) *
> +                                     QAT_PMD_COMP_SGL_DEF_SEGMENTS,
> +                                     64, socket_id);
> +
> +             if (cookie->qat_sgl_src_d == NULL ||
> +                             cookie->qat_sgl_dst_d == NULL) {
> +                     QAT_LOG(ERR, "Can't allocate SGL"
> +                                  " for device %s",
> +                                  qat_private->qat_dev->name);
> +                     return -ENOMEM;
> +             }
> +
>               cookie->qat_sgl_src_phys_addr =
> -                             rte_mempool_virt2iova(cookie) +
> -                             offsetof(struct qat_comp_op_cookie,
> -                             qat_sgl_src);
> +                             rte_malloc_virt2iova(cookie->qat_sgl_src_d);
>   
>               cookie->qat_sgl_dst_phys_addr =
> -                             rte_mempool_virt2iova(cookie) +
> -                             offsetof(struct qat_comp_op_cookie,
> -                             qat_sgl_dst);
> +                             rte_malloc_virt2iova(cookie->qat_sgl_dst_d);
> +
> +             cookie->dst_nb_elems = cookie->src_nb_elems =
> +                             QAT_PMD_COMP_SGL_DEF_SEGMENTS;
>       }
>   
>       return ret;

Reply via email to