On Tue, Oct 1, 2019 at 6:04 AM Kevin Traynor <ktray...@redhat.com> wrote:

> If rc contains a non-zero return code from bnxt_hwrm_send_message(),
> HWRM_CHECK_RESULT_SILENT() will return.
>
> Just after that code, there is an 'if (!rc) {...} else {...}'.
> Coverity is complaining that this if else statement is dead code as
> rc will always be 0 if that code is reached.
>
> 4309        rc = bnxt_hwrm_send_message(bp, &req, sizeof(req),
>                    BNXT_USE_CHIMP_MB);
>     cond_const: Condition rc, taking false branch.
>     Now the value of rc is equal to 0.
> 4310        HWRM_CHECK_RESULT_SILENT();
> 4311
>     const: At condition rc, the value of rc must be equal to 0.
>     dead_error_condition: The condition !rc must be true.
> 4312        if (!rc) {
>
> [snip]
>
> 4373        } else {
>     CID 343450 (#1 of 1): Logically dead code
>     (DEADCODE)dead_error_line: Execution cannot
>     reach this statement: rc = 0;.
> 4374                rc = 0;
> 4375        }
>
> Coverity issue: 343450
> Fixes: f8168ca0e690 ("net/bnxt: support thor controller")
> Cc: lance.richard...@broadcom.com
> Cc: sta...@dpdk.org
>
> Signed-off-by: Kevin Traynor <ktray...@redhat.com>
>
Acked-by: Ajit Khaparde <ajit.khapa...@broadcom.com>


> ---
>  drivers/net/bnxt/bnxt_hwrm.c | 118 +++++++++++++++++------------------
>  1 file changed, 56 insertions(+), 62 deletions(-)
>
> diff --git a/drivers/net/bnxt/bnxt_hwrm.c b/drivers/net/bnxt/bnxt_hwrm.c
> index 9883fb506..5378e3e9c 100644
> --- a/drivers/net/bnxt/bnxt_hwrm.c
> +++ b/drivers/net/bnxt/bnxt_hwrm.c
> @@ -4298,5 +4298,7 @@ int bnxt_hwrm_func_backing_store_qcaps(struct bnxt
> *bp)
>         struct hwrm_func_backing_store_qcaps_output *resp =
>                 bp->hwrm_cmd_resp_addr;
> -       int rc;
> +       struct bnxt_ctx_pg_info *ctx_pg;
> +       struct bnxt_ctx_mem_info *ctx;
> +       int rc, total_alloc_len, i;
>
>         if (!BNXT_CHIP_THOR(bp) ||
> @@ -4310,68 +4312,60 @@ int bnxt_hwrm_func_backing_store_qcaps(struct bnxt
> *bp)
>         HWRM_CHECK_RESULT_SILENT();
>
> -       if (!rc) {
> -               struct bnxt_ctx_pg_info *ctx_pg;
> -               struct bnxt_ctx_mem_info *ctx;
> -               int total_alloc_len;
> -               int i;
> +       total_alloc_len = sizeof(*ctx);
> +       ctx = rte_malloc("bnxt_ctx_mem", total_alloc_len,
> +                        RTE_CACHE_LINE_SIZE);
> +       if (!ctx) {
> +               rc = -ENOMEM;
> +               goto ctx_err;
> +       }
> +       memset(ctx, 0, total_alloc_len);
>
> -               total_alloc_len = sizeof(*ctx);
> -               ctx = rte_malloc("bnxt_ctx_mem", total_alloc_len,
> -                                RTE_CACHE_LINE_SIZE);
> -               if (!ctx) {
> -                       rc = -ENOMEM;
> -                       goto ctx_err;
> -               }
> -               memset(ctx, 0, total_alloc_len);
> +       ctx_pg = rte_malloc("bnxt_ctx_pg_mem",
> +                           sizeof(*ctx_pg) * BNXT_MAX_Q,
> +                           RTE_CACHE_LINE_SIZE);
> +       if (!ctx_pg) {
> +               rc = -ENOMEM;
> +               goto ctx_err;
> +       }
> +       for (i = 0; i < BNXT_MAX_Q; i++, ctx_pg++)
> +               ctx->tqm_mem[i] = ctx_pg;
>
> -               ctx_pg = rte_malloc("bnxt_ctx_pg_mem",
> -                                   sizeof(*ctx_pg) * BNXT_MAX_Q,
> -                                   RTE_CACHE_LINE_SIZE);
> -               if (!ctx_pg) {
> -                       rc = -ENOMEM;
> -                       goto ctx_err;
> -               }
> -               for (i = 0; i < BNXT_MAX_Q; i++, ctx_pg++)
> -                       ctx->tqm_mem[i] = ctx_pg;
> +       bp->ctx = ctx;
> +       ctx->qp_max_entries = rte_le_to_cpu_32(resp->qp_max_entries);
> +       ctx->qp_min_qp1_entries =
> +               rte_le_to_cpu_16(resp->qp_min_qp1_entries);
> +       ctx->qp_max_l2_entries =
> +               rte_le_to_cpu_16(resp->qp_max_l2_entries);
> +       ctx->qp_entry_size = rte_le_to_cpu_16(resp->qp_entry_size);
> +       ctx->srq_max_l2_entries =
> +               rte_le_to_cpu_16(resp->srq_max_l2_entries);
> +       ctx->srq_max_entries = rte_le_to_cpu_32(resp->srq_max_entries);
> +       ctx->srq_entry_size = rte_le_to_cpu_16(resp->srq_entry_size);
> +       ctx->cq_max_l2_entries =
> +               rte_le_to_cpu_16(resp->cq_max_l2_entries);
> +       ctx->cq_max_entries = rte_le_to_cpu_32(resp->cq_max_entries);
> +       ctx->cq_entry_size = rte_le_to_cpu_16(resp->cq_entry_size);
> +       ctx->vnic_max_vnic_entries =
> +               rte_le_to_cpu_16(resp->vnic_max_vnic_entries);
> +       ctx->vnic_max_ring_table_entries =
> +               rte_le_to_cpu_16(resp->vnic_max_ring_table_entries);
> +       ctx->vnic_entry_size = rte_le_to_cpu_16(resp->vnic_entry_size);
> +       ctx->stat_max_entries =
> +               rte_le_to_cpu_32(resp->stat_max_entries);
> +       ctx->stat_entry_size = rte_le_to_cpu_16(resp->stat_entry_size);
> +       ctx->tqm_entry_size = rte_le_to_cpu_16(resp->tqm_entry_size);
> +       ctx->tqm_min_entries_per_ring =
> +               rte_le_to_cpu_32(resp->tqm_min_entries_per_ring);
> +       ctx->tqm_max_entries_per_ring =
> +               rte_le_to_cpu_32(resp->tqm_max_entries_per_ring);
> +       ctx->tqm_entries_multiple = resp->tqm_entries_multiple;
> +       if (!ctx->tqm_entries_multiple)
> +               ctx->tqm_entries_multiple = 1;
> +       ctx->mrav_max_entries =
> +               rte_le_to_cpu_32(resp->mrav_max_entries);
> +       ctx->mrav_entry_size = rte_le_to_cpu_16(resp->mrav_entry_size);
> +       ctx->tim_entry_size = rte_le_to_cpu_16(resp->tim_entry_size);
> +       ctx->tim_max_entries = rte_le_to_cpu_32(resp->tim_max_entries);
>
> -               bp->ctx = ctx;
> -               ctx->qp_max_entries =
> rte_le_to_cpu_32(resp->qp_max_entries);
> -               ctx->qp_min_qp1_entries =
> -                       rte_le_to_cpu_16(resp->qp_min_qp1_entries);
> -               ctx->qp_max_l2_entries =
> -                       rte_le_to_cpu_16(resp->qp_max_l2_entries);
> -               ctx->qp_entry_size = rte_le_to_cpu_16(resp->qp_entry_size);
> -               ctx->srq_max_l2_entries =
> -                       rte_le_to_cpu_16(resp->srq_max_l2_entries);
> -               ctx->srq_max_entries =
> rte_le_to_cpu_32(resp->srq_max_entries);
> -               ctx->srq_entry_size =
> rte_le_to_cpu_16(resp->srq_entry_size);
> -               ctx->cq_max_l2_entries =
> -                       rte_le_to_cpu_16(resp->cq_max_l2_entries);
> -               ctx->cq_max_entries =
> rte_le_to_cpu_32(resp->cq_max_entries);
> -               ctx->cq_entry_size = rte_le_to_cpu_16(resp->cq_entry_size);
> -               ctx->vnic_max_vnic_entries =
> -                       rte_le_to_cpu_16(resp->vnic_max_vnic_entries);
> -               ctx->vnic_max_ring_table_entries =
> -
>  rte_le_to_cpu_16(resp->vnic_max_ring_table_entries);
> -               ctx->vnic_entry_size =
> rte_le_to_cpu_16(resp->vnic_entry_size);
> -               ctx->stat_max_entries =
> -                       rte_le_to_cpu_32(resp->stat_max_entries);
> -               ctx->stat_entry_size =
> rte_le_to_cpu_16(resp->stat_entry_size);
> -               ctx->tqm_entry_size =
> rte_le_to_cpu_16(resp->tqm_entry_size);
> -               ctx->tqm_min_entries_per_ring =
> -                       rte_le_to_cpu_32(resp->tqm_min_entries_per_ring);
> -               ctx->tqm_max_entries_per_ring =
> -                       rte_le_to_cpu_32(resp->tqm_max_entries_per_ring);
> -               ctx->tqm_entries_multiple = resp->tqm_entries_multiple;
> -               if (!ctx->tqm_entries_multiple)
> -                       ctx->tqm_entries_multiple = 1;
> -               ctx->mrav_max_entries =
> -                       rte_le_to_cpu_32(resp->mrav_max_entries);
> -               ctx->mrav_entry_size =
> rte_le_to_cpu_16(resp->mrav_entry_size);
> -               ctx->tim_entry_size =
> rte_le_to_cpu_16(resp->tim_entry_size);
> -               ctx->tim_max_entries =
> rte_le_to_cpu_32(resp->tim_max_entries);
> -       } else {
> -               rc = 0;
> -       }
>  ctx_err:
>         HWRM_UNLOCK();
> --
> 2.21.0
>
>

Reply via email to