+{
+ char rcu_dq_name[RTE_RCU_QSBR_DQ_NAMESIZE];
+ struct rte_rcu_qsbr_dq_parameters params = {0};
+
+ if ((lpm == NULL) || (cfg == NULL)) {
+ rte_errno = EINVAL;
+ return 1;
+ }
+
+ if (lpm->v) {
+ rte_errno = EEXIST;
+ return 1;
+ }
+
+ if (cfg->mode == RTE_LPM_QSBR_MODE_SYNC) {
+ /* No other things to do. */
+ } else if (cfg->mode == RTE_LPM_QSBR_MODE_DQ) {
+ /* Init QSBR defer queue. */
+ snprintf(rcu_dq_name, sizeof(rcu_dq_name),
+ "LPM_RCU_%s", lpm->name);
+ params.name = rcu_dq_name;
+ params.size = cfg->dq_size;
+ if (params.size == 0)
+ params.size = lpm->number_tbl8s;
+ params.trigger_reclaim_limit = cfg->reclaim_thd;
+ if (params.trigger_reclaim_limit == 0)
+ params.trigger_reclaim_limit =
+ RTE_LPM_RCU_DQ_RECLAIM_THD;
+ params.max_reclaim_size = cfg->reclaim_max;
+ if (params.max_reclaim_size == 0)
+ params.max_reclaim_size =
RTE_LPM_RCU_DQ_RECLAIM_MAX;
+ params.esize = sizeof(uint32_t); /* tbl8 group index */
+ params.free_fn = __lpm_rcu_qsbr_free_resource;
+ params.p = lpm->tbl8;
+ params.v = cfg->v;
+ lpm->dq = rte_rcu_qsbr_dq_create(¶ms);
+ if (lpm->dq == NULL) {
+ RTE_LOG(ERR, LPM,
+ "LPM QS defer queue creation
failed\n");
+ return 1;
+ }
+ if (dq)
+ *dq = lpm->dq;
+ } else {
+ rte_errno = EINVAL;
+ return 1;
+ }
+ lpm->rcu_mode = cfg->mode;
+ lpm->v = cfg->v;
+
+ return 0;
+}
+
/*
* Adds a rule to the rule table.
*
@@ -394,14 +468,15 @@ rule_find(struct rte_lpm *lpm, uint32_t ip_masked,
uint8_t depth)
* Find, clean and allocate a tbl8.
*/
static int32_t
-tbl8_alloc(struct rte_lpm_tbl_entry *tbl8, uint32_t number_tbl8s)
+_tbl8_alloc(struct rte_lpm *lpm)
{
uint32_t group_idx; /* tbl8 group index. */
struct rte_lpm_tbl_entry *tbl8_entry;
/* Scan through tbl8 to find a free (i.e. INVALID) tbl8 group. */
- for (group_idx = 0; group_idx < number_tbl8s; group_idx++) {
- tbl8_entry = &tbl8[group_idx *
RTE_LPM_TBL8_GROUP_NUM_ENTRIES];
+ for (group_idx = 0; group_idx < lpm->number_tbl8s; group_idx++) {
+ tbl8_entry = &lpm->tbl8[group_idx *
+
RTE_LPM_TBL8_GROUP_NUM_ENTRIES];
/* If a free tbl8 group is found clean it and set as VALID. */
if (!tbl8_entry->valid_group) {
struct rte_lpm_tbl_entry new_tbl8_entry = { @@ -
427,14 +502,40 @@ tbl8_alloc(struct rte_lpm_tbl_entry *tbl8, uint32_t
number_tbl8s)
return -ENOSPC;
}
+static int32_t
+tbl8_alloc(struct rte_lpm *lpm)
+{
+ int32_t group_idx; /* tbl8 group index. */
+
+ group_idx = _tbl8_alloc(lpm);
+ if ((group_idx < 0) && (lpm->dq != NULL)) {
+ /* If there are no tbl8 groups try to reclaim one. */
+ if (rte_rcu_qsbr_dq_reclaim(lpm->dq, 1, NULL, NULL, NULL)
== 0)
+ group_idx = _tbl8_alloc(lpm);
+ }
+
+ return group_idx;
+}
+
static void
-tbl8_free(struct rte_lpm_tbl_entry *tbl8, uint32_t tbl8_group_start)
+tbl8_free(struct rte_lpm *lpm, uint32_t tbl8_group_start)
{
- /* Set tbl8 group invalid*/
struct rte_lpm_tbl_entry zero_tbl8_entry = {0};
- __atomic_store(&tbl8[tbl8_group_start], &zero_tbl8_entry,
- __ATOMIC_RELAXED);
+ if (!lpm->v) {
+ /* Set tbl8 group invalid*/
+ __atomic_store(&lpm->tbl8[tbl8_group_start],
&zero_tbl8_entry,
+ __ATOMIC_RELAXED);
+ } else if (lpm->rcu_mode == RTE_LPM_QSBR_MODE_SYNC) {
+ /* Wait for quiescent state change. */
+ rte_rcu_qsbr_synchronize(lpm->v,
RTE_QSBR_THRID_INVALID);
+ /* Set tbl8 group invalid*/
+ __atomic_store(&lpm->tbl8[tbl8_group_start],
&zero_tbl8_entry,
+ __ATOMIC_RELAXED);
+ } else if (lpm->rcu_mode == RTE_LPM_QSBR_MODE_DQ) {
+ /* Push into QSBR defer queue. */
+ rte_rcu_qsbr_dq_enqueue(lpm->dq, (void
*)&tbl8_group_start);
+ }
}
static __rte_noinline int32_t
@@ -523,7 +624,7 @@ add_depth_big(struct rte_lpm *lpm, uint32_t
ip_masked, uint8_t depth,
if (!lpm->tbl24[tbl24_index].valid) {
/* Search for a free tbl8 group. */
- tbl8_group_index = tbl8_alloc(lpm->tbl8, lpm->number_tbl8s);
+ tbl8_group_index = tbl8_alloc(lpm);
/* Check tbl8 allocation was successful. */
if (tbl8_group_index < 0) {
@@ -569,7 +670,7 @@ add_depth_big(struct rte_lpm *lpm, uint32_t
ip_masked, uint8_t depth,
} /* If valid entry but not extended calculate the index into Table8. */
else if (lpm->tbl24[tbl24_index].valid_group == 0) {
/* Search for free tbl8 group. */
- tbl8_group_index = tbl8_alloc(lpm->tbl8, lpm->number_tbl8s);
+ tbl8_group_index = tbl8_alloc(lpm);
if (tbl8_group_index < 0) {
return tbl8_group_index;
@@ -977,7 +1078,7 @@ delete_depth_big(struct rte_lpm *lpm, uint32_t
ip_masked,
*/
lpm->tbl24[tbl24_index].valid = 0;
__atomic_thread_fence(__ATOMIC_RELEASE);
- tbl8_free(lpm->tbl8, tbl8_group_start);
+ tbl8_free(lpm, tbl8_group_start);
} else if (tbl8_recycle_index > -1) {
/* Update tbl24 entry. */
struct rte_lpm_tbl_entry new_tbl24_entry = { @@ -993,7
+1094,7 @@ delete_depth_big(struct rte_lpm *lpm, uint32_t ip_masked,
__atomic_store(&lpm->tbl24[tbl24_index],
&new_tbl24_entry,
__ATOMIC_RELAXED);
__atomic_thread_fence(__ATOMIC_RELEASE);
- tbl8_free(lpm->tbl8, tbl8_group_start);
+ tbl8_free(lpm, tbl8_group_start);
}
#undef group_idx
return 0;
diff --git a/lib/librte_lpm/rte_lpm.h b/lib/librte_lpm/rte_lpm.h index
b9d49ac87..8c054509a 100644
--- a/lib/librte_lpm/rte_lpm.h
+++ b/lib/librte_lpm/rte_lpm.h
@@ -1,5 +1,6 @@
/* SPDX-License-Identifier: BSD-3-Clause
* Copyright(c) 2010-2014 Intel Corporation
+ * Copyright(c) 2020 Arm Limited
*/
#ifndef _RTE_LPM_H_
@@ -20,6 +21,7 @@
#include <rte_memory.h>
#include <rte_common.h>
#include <rte_vect.h>
+#include <rte_rcu_qsbr.h>
#ifdef __cplusplus
extern "C" {
@@ -62,6 +64,17 @@ extern "C" {
/** Bitmask used to indicate successful lookup */
#define RTE_LPM_LOOKUP_SUCCESS 0x01000000
+/** @internal Default threshold to trigger RCU defer queue reclaimation. */
+#define RTE_LPM_RCU_DQ_RECLAIM_THD 32
+
+/** @internal Default RCU defer queue entries to reclaim in one go. */
+#define RTE_LPM_RCU_DQ_RECLAIM_MAX 16
+
+/* Create defer queue for reclaim. */
+#define RTE_LPM_QSBR_MODE_DQ 0
+/* Use blocking mode reclaim. No defer queue created. */
+#define RTE_LPM_QSBR_MODE_SYNC 0x01
+
#if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN
/** @internal Tbl24 entry structure. */ __extension__ @@ -130,6 +143,28
@@ struct rte_lpm {
__rte_cache_aligned; /**< LPM tbl24 table. */
struct rte_lpm_tbl_entry *tbl8; /**< LPM tbl8 table. */
struct rte_lpm_rule *rules_tbl; /**< LPM rules. */
+
+ /* RCU config. */
+ struct rte_rcu_qsbr *v; /* RCU QSBR variable. */
+ uint32_t rcu_mode; /* Blocking, defer queue. */
+ struct rte_rcu_qsbr_dq *dq; /* RCU QSBR defer queue. */
+};
+
+/** LPM RCU QSBR configuration structure. */ struct rte_lpm_rcu_config
+{
+ struct rte_rcu_qsbr *v; /* RCU QSBR variable. */
+ /* Mode of RCU QSBR. RTE_LPM_QSBR_MODE_xxx
+ * '0' for default: create defer queue for reclaim.
+ */
+ uint32_t mode;
+ /* RCU defer queue size. default: lpm->number_tbl8s. */
+ uint32_t dq_size;
+ uint32_t reclaim_thd; /* Threshold to trigger auto reclaim.
+ * default:
RTE_LPM_RCU_DQ_RECLAIM_TRHD.
+ */
+ uint32_t reclaim_max; /* Max entries to reclaim in one go.
+ * default:
RTE_LPM_RCU_DQ_RECLAIM_MAX.
+ */
};
/**
@@ -179,6 +214,30 @@ rte_lpm_find_existing(const char *name); void
rte_lpm_free(struct rte_lpm *lpm);
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice
+ *
+ * Associate RCU QSBR variable with an LPM object.
+ *
+ * @param lpm
+ * the lpm object to add RCU QSBR
+ * @param cfg
+ * RCU QSBR configuration
+ * @param dq
+ * handler of created RCU QSBR defer queue
+ * @return
+ * On success - 0
+ * On error - 1 with error code set in rte_errno.
+ * Possible rte_errno codes are:
+ * - EINVAL - invalid pointer
+ * - EEXIST - already added QSBR
+ * - ENOMEM - memory allocation failure
+ */
+__rte_experimental
+int rte_lpm_rcu_qsbr_add(struct rte_lpm *lpm, struct rte_lpm_rcu_config
*cfg,
+ struct rte_rcu_qsbr_dq **dq);
+
/**
* Add a rule to the LPM table.
*
diff --git a/lib/librte_lpm/rte_lpm_version.map
b/lib/librte_lpm/rte_lpm_version.map
index 500f58b80..bfccd7eac 100644
--- a/lib/librte_lpm/rte_lpm_version.map
+++ b/lib/librte_lpm/rte_lpm_version.map
@@ -21,3 +21,9 @@ DPDK_20.0 {
local: *;
};
+
+EXPERIMENTAL {
+ global:
+
+ rte_lpm_rcu_qsbr_add;
+};
--
2.17.1