In cases where some ops failed to enqueue, the op session was never being reset. This resulted in a segmentation fault when processing ops the next time. To fix this, only set the op session after the failure condition is checked.
Also, the incorrect ops index was being used for session retrieval when dequeueing for the secondary worker. Fixes: 6812b9bf470e ("crypto/scheduler: use unified session") Reported-by: Kevin O'Sullivan <kevin.osulli...@intel.com> Signed-off-by: Ciara Power <ciara.po...@intel.com> --- drivers/crypto/scheduler/scheduler_failover.c | 8 ++++- .../scheduler/scheduler_pkt_size_distr.c | 30 +++++++++---------- 2 files changed, 22 insertions(+), 16 deletions(-) diff --git a/drivers/crypto/scheduler/scheduler_failover.c b/drivers/crypto/scheduler/scheduler_failover.c index 7fadcf66d0..f24d2fc44b 100644 --- a/drivers/crypto/scheduler/scheduler_failover.c +++ b/drivers/crypto/scheduler/scheduler_failover.c @@ -50,12 +50,18 @@ schedule_enqueue(void *qp, struct rte_crypto_op **ops, uint16_t nb_ops) enqueued_ops = failover_worker_enqueue(&qp_ctx->primary_worker, ops, nb_ops, PRIMARY_WORKER_IDX); - if (enqueued_ops < nb_ops) + if (enqueued_ops < nb_ops) { + scheduler_retrieve_session(&ops[enqueued_ops], + nb_ops - enqueued_ops); enqueued_ops += failover_worker_enqueue( &qp_ctx->secondary_worker, &ops[enqueued_ops], nb_ops - enqueued_ops, SECONDARY_WORKER_IDX); + if (enqueued_ops < nb_ops) + scheduler_retrieve_session(&ops[enqueued_ops], + nb_ops - enqueued_ops); + } return enqueued_ops; } diff --git a/drivers/crypto/scheduler/scheduler_pkt_size_distr.c b/drivers/crypto/scheduler/scheduler_pkt_size_distr.c index 41f05e6a47..0c51fff930 100644 --- a/drivers/crypto/scheduler/scheduler_pkt_size_distr.c +++ b/drivers/crypto/scheduler/scheduler_pkt_size_distr.c @@ -89,9 +89,6 @@ schedule_enqueue(void *qp, struct rte_crypto_op **ops, uint16_t nb_ops) ops[i]->sym->auth.data.length; /* decide the target op based on the job length */ target[0] = !(job_len[0] & psd_qp_ctx->threshold); - if (ops[i]->sess_type == RTE_CRYPTO_OP_WITH_SESSION) - ops[i]->sym->session = - sess_ctx[0]->worker_sess[target[0]]; p_enq_op = &enq_ops[target[0]]; /* stop schedule cops before the queue is full, this shall @@ -103,6 +100,9 @@ schedule_enqueue(void *qp, struct rte_crypto_op **ops, uint16_t nb_ops) break; } + if (ops[i]->sess_type == RTE_CRYPTO_OP_WITH_SESSION) + ops[i]->sym->session = + sess_ctx[0]->worker_sess[target[0]]; sched_ops[p_enq_op->worker_idx][p_enq_op->pos] = ops[i]; p_enq_op->pos++; @@ -110,9 +110,6 @@ schedule_enqueue(void *qp, struct rte_crypto_op **ops, uint16_t nb_ops) job_len[1] += (ops[i + 1]->sym->cipher.data.length == 0) * ops[i+1]->sym->auth.data.length; target[1] = !(job_len[1] & psd_qp_ctx->threshold); - if (ops[i + 1]->sess_type == RTE_CRYPTO_OP_WITH_SESSION) - ops[i + 1]->sym->session = - sess_ctx[1]->worker_sess[target[1]]; p_enq_op = &enq_ops[target[1]]; if (p_enq_op->pos + in_flight_ops[p_enq_op->worker_idx] == @@ -121,6 +118,9 @@ schedule_enqueue(void *qp, struct rte_crypto_op **ops, uint16_t nb_ops) break; } + if (ops[i + 1]->sess_type == RTE_CRYPTO_OP_WITH_SESSION) + ops[i + 1]->sym->session = + sess_ctx[1]->worker_sess[target[1]]; sched_ops[p_enq_op->worker_idx][p_enq_op->pos] = ops[i+1]; p_enq_op->pos++; @@ -128,9 +128,6 @@ schedule_enqueue(void *qp, struct rte_crypto_op **ops, uint16_t nb_ops) job_len[2] += (ops[i + 2]->sym->cipher.data.length == 0) * ops[i + 2]->sym->auth.data.length; target[2] = !(job_len[2] & psd_qp_ctx->threshold); - if (ops[i + 2]->sess_type == RTE_CRYPTO_OP_WITH_SESSION) - ops[i + 2]->sym->session = - sess_ctx[2]->worker_sess[target[2]]; p_enq_op = &enq_ops[target[2]]; if (p_enq_op->pos + in_flight_ops[p_enq_op->worker_idx] == @@ -139,6 +136,9 @@ schedule_enqueue(void *qp, struct rte_crypto_op **ops, uint16_t nb_ops) break; } + if (ops[i + 2]->sess_type == RTE_CRYPTO_OP_WITH_SESSION) + ops[i + 2]->sym->session = + sess_ctx[2]->worker_sess[target[2]]; sched_ops[p_enq_op->worker_idx][p_enq_op->pos] = ops[i+2]; p_enq_op->pos++; @@ -146,9 +146,6 @@ schedule_enqueue(void *qp, struct rte_crypto_op **ops, uint16_t nb_ops) job_len[3] += (ops[i + 3]->sym->cipher.data.length == 0) * ops[i + 3]->sym->auth.data.length; target[3] = !(job_len[3] & psd_qp_ctx->threshold); - if (ops[i + 3]->sess_type == RTE_CRYPTO_OP_WITH_SESSION) - ops[i + 3]->sym->session = - sess_ctx[3]->worker_sess[target[3]]; p_enq_op = &enq_ops[target[3]]; if (p_enq_op->pos + in_flight_ops[p_enq_op->worker_idx] == @@ -157,6 +154,9 @@ schedule_enqueue(void *qp, struct rte_crypto_op **ops, uint16_t nb_ops) break; } + if (ops[i + 3]->sess_type == RTE_CRYPTO_OP_WITH_SESSION) + ops[i + 3]->sym->session = + sess_ctx[3]->worker_sess[target[3]]; sched_ops[p_enq_op->worker_idx][p_enq_op->pos] = ops[i+3]; p_enq_op->pos++; } @@ -171,8 +171,6 @@ schedule_enqueue(void *qp, struct rte_crypto_op **ops, uint16_t nb_ops) job_len += (ops[i]->sym->cipher.data.length == 0) * ops[i]->sym->auth.data.length; target = !(job_len & psd_qp_ctx->threshold); - if (ops[i]->sess_type == RTE_CRYPTO_OP_WITH_SESSION) - ops[i]->sym->session = sess_ctx->worker_sess[target]; p_enq_op = &enq_ops[target]; if (p_enq_op->pos + in_flight_ops[p_enq_op->worker_idx] == @@ -181,6 +179,8 @@ schedule_enqueue(void *qp, struct rte_crypto_op **ops, uint16_t nb_ops) break; } + if (ops[i]->sess_type == RTE_CRYPTO_OP_WITH_SESSION) + ops[i]->sym->session = sess_ctx->worker_sess[target]; sched_ops[p_enq_op->worker_idx][p_enq_op->pos] = ops[i]; p_enq_op->pos++; } @@ -251,7 +251,7 @@ schedule_dequeue(void *qp, struct rte_crypto_op **ops, uint16_t nb_ops) nb_deq_ops_sec = rte_cryptodev_dequeue_burst(worker->dev_id, worker->qp_id, &ops[nb_deq_ops_pri], nb_ops - nb_deq_ops_pri); - scheduler_retrieve_session(ops, nb_deq_ops_sec); + scheduler_retrieve_session(&ops[nb_deq_ops_pri], nb_deq_ops_sec); worker->nb_inflight_cops -= nb_deq_ops_sec; if (!worker->nb_inflight_cops) -- 2.25.1