On 2022-05-06 03:26, fengchengwen wrote:
On 2022/5/1 22:03, Mattias Rönnblom wrote:
A sequence lock (seqlock) is synchronization primitive which allows
for data-race free, low-overhead, high-frequency reads, especially for
data structures shared across many cores and which are updated
relatively infrequently.


...

+}
+
+static void
+reader_stop(struct reader *reader)
+{
+       __atomic_store_n(&reader->stop, 1, __ATOMIC_RELAXED);
+}
+
+#define NUM_WRITERS (2) /* main lcore + one worker */
+#define MIN_NUM_READERS (2)
+#define MAX_READERS (RTE_MAX_LCORE - NUM_WRITERS - 1)

Why minus 1 ?
Suggest define MAX_READERS RTE_MAX_LCORE to avoid underflow with small size VM.


OK.

+#define MIN_LCORE_COUNT (NUM_WRITERS + MIN_NUM_READERS)
+
+/* Only a compile-time test */
+static rte_seqlock_t __rte_unused static_init_lock = RTE_SEQLOCK_INITIALIZER;
+
+static int
+test_seqlock(void)
+{
+       struct reader readers[MAX_READERS];
+       unsigned int num_readers;
+       unsigned int num_lcores;
+       unsigned int i;
+       unsigned int lcore_id;
+       unsigned int reader_lcore_ids[MAX_READERS];
+       unsigned int worker_writer_lcore_id = 0;
+       int rc = TEST_SUCCESS;
+
+       num_lcores = rte_lcore_count();
+
+       if (num_lcores < MIN_LCORE_COUNT) {
+               printf("Too few cores to run test. Skipping.\n");
+               return TEST_SKIPPED;
+       }
+
+       num_readers = num_lcores - NUM_WRITERS;
+
+       struct data *data = rte_zmalloc(NULL, sizeof(struct data), 0);

Please check whether the value of data is NULL.


OK.

+
+       i = 0;
+       RTE_LCORE_FOREACH_WORKER(lcore_id) {
+               if (i == 0) {
+                       rte_eal_remote_launch(writer_run, data, lcore_id);
+                       worker_writer_lcore_id = lcore_id;
+               } else {
+                       unsigned int reader_idx = i - 1;
+                       struct reader *reader = &readers[reader_idx];
+
+                       reader->data = data;
+                       reader->stop = 0;
+
+                       rte_eal_remote_launch(reader_run, reader, lcore_id);
+                       reader_lcore_ids[reader_idx] = lcore_id;
+               }
+               i++;
+       }
+
+       if (writer_run(data) != 0 ||
+           rte_eal_wait_lcore(worker_writer_lcore_id) != 0)
+               rc = TEST_FAILED;
+
+       for (i = 0; i < num_readers; i++) {
+               reader_stop(&readers[i]);
+               if (rte_eal_wait_lcore(reader_lcore_ids[i]) != 0)
+                       rc = TEST_FAILED;
+       }
+

Please free data memory.


OK.

+       return rc;
+}
+
+REGISTER_TEST_COMMAND(seqlock_autotest, test_seqlock);
diff --git a/doc/api/doxy-api-index.md b/doc/api/doxy-api-index.md
index 4245b9635c..f23e33ae30 100644
--- a/doc/api/doxy-api-index.md
+++ b/doc/api/doxy-api-index.md
@@ -77,6 +77,7 @@ The public API headers are grouped by topics:
    [rwlock]             (@ref rte_rwlock.h),
    [spinlock]           (@ref rte_spinlock.h),
    [ticketlock]         (@ref rte_ticketlock.h),
+  [seqlock]            (@ref rte_seqlock.h),
    [RCU]                (@ref rte_rcu_qsbr.h)

...

+ */
+__rte_experimental
+static inline bool
+rte_seqlock_read_retry(const rte_seqlock_t *seqlock, uint32_t begin_sn)
+{
+       uint32_t end_sn;
+
+       /* An odd sequence number means the protected data was being
+        * modified already at the point of the rte_seqlock_read_begin()
+        * call.
+        */
+       if (unlikely(begin_sn & 1))
+               return true;
+
+       /* make sure the data loads happens before the sn load */
+       rte_atomic_thread_fence(__ATOMIC_ACQUIRE);

In ARMv8, the rte_atomic_thread_fence(__ATOMIC_ACQUIRE) and rte_smp_rmb() both 
output 'dma ishld'
Suggest use rte_smp_rmb(), please see below comment.

+
+       end_sn = __atomic_load_n(&seqlock->sn, __ATOMIC_RELAXED);
+
+       /* A writer incremented the sequence number during this read
+        * critical section.
+        */
+       if (unlikely(begin_sn != end_sn))
+               return true;
+
+       return false;
+}
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice.
+ *
+ * Begin a write-side critical section.
+ *
+ * A call to this function acquires the write lock associated @p
+ * seqlock, and marks the beginning of a write-side critical section.
+ *
+ * After having called this function, the caller may go on to modify
+ * (both read and write) the protected data, in an atomic or
+ * non-atomic manner.
+ *
+ * After the necessary updates have been performed, the application
+ * calls rte_seqlock_write_unlock().
+ *
+ * This function is not preemption-safe in the sense that preemption
+ * of the calling thread may block reader progress until the writer
+ * thread is rescheduled.
+ *
+ * Unlike rte_seqlock_read_begin(), each call made to
+ * rte_seqlock_write_lock() must be matched with an unlock call.
+ *
+ * @param seqlock
+ *   A pointer to the seqlock.
+ *
+ * @see rte_seqlock_write_unlock()
+ */
+__rte_experimental
+static inline void
+rte_seqlock_write_lock(rte_seqlock_t *seqlock)
+{
+       uint32_t sn;
+
+       /* to synchronize with other writers */
+       rte_spinlock_lock(&seqlock->lock);
+
+       sn = seqlock->sn + 1;
+
+       __atomic_store_n(&seqlock->sn, sn, __ATOMIC_RELAXED);
+
+       /* __ATOMIC_RELEASE to prevent stores after (in program order)
+        * from happening before the sn store.
+        */
+       rte_atomic_thread_fence(__ATOMIC_RELEASE);

In ARMv8, rte_atomic_thread_fence(__ATOMIC_RELEASE) will output 'dmb ish', and
rte_smp_wmb() will output 'dma ishst'.
Suggest use rte_smp_wmb(). I think here only need to use store mb here.


(This has already been discussed further down in the mail thread, and I have nothing to add.)

+}
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice.
+ *
+ * End a write-side critical section.
+ *
+ * A call to this function marks the end of the write-side critical
+ * section, for @p seqlock. After this call has been made, the protected
+ * data may no longer be modified.
+ *
+ * @param seqlock
+ *   A pointer to the seqlock.
+ *
+ * @see rte_seqlock_write_lock()
+ */
+__rte_experimental
+static inline void
+rte_seqlock_write_unlock(rte_seqlock_t *seqlock)
+{
+       uint32_t sn;
+
+       sn = seqlock->sn + 1;
+
+       /* synchronizes-with the load acquire in rte_seqlock_read_begin() */
+       __atomic_store_n(&seqlock->sn, sn, __ATOMIC_RELEASE);
+
+       rte_spinlock_unlock(&seqlock->lock);
+}
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif  /* _RTE_SEQLOCK_H_ */
diff --git a/lib/eal/version.map b/lib/eal/version.map
index b53eeb30d7..4a9d0ed899 100644
--- a/lib/eal/version.map
+++ b/lib/eal/version.map
@@ -420,6 +420,9 @@ EXPERIMENTAL {
        rte_intr_instance_free;
        rte_intr_type_get;
        rte_intr_type_set;
+
+       # added in 22.07
+       rte_seqlock_init;
  };
INTERNAL {


Reviewed-by: Chengwen Feng <fengcheng...@huawei.com>


Thanks a lot for the review!

Reply via email to