Because with latches there is a strict data dependency on the seq load
we can avoid the rmb in favour of a read_barrier_depends.

Suggested-by: Ingo Molnar <mi...@kernel.org>
Signed-off-by: Peter Zijlstra (Intel) <pet...@infradead.org>
---
 include/linux/seqlock.h   |    9 +++++++--
 kernel/time/timekeeping.c |    2 +-
 2 files changed, 8 insertions(+), 3 deletions(-)

--- a/include/linux/seqlock.h
+++ b/include/linux/seqlock.h
@@ -35,6 +35,7 @@
 #include <linux/spinlock.h>
 #include <linux/preempt.h>
 #include <linux/lockdep.h>
+#include <linux/compiler.h>
 #include <asm/processor.h>
 
 /*
@@ -233,6 +234,11 @@ static inline void raw_write_seqcount_en
        s->sequence++;
 }
 
+static inline int raw_read_seqcount_latch(seqcount_t *s)
+{
+       return lockless_dereference(s->sequence);
+}
+
 /**
  * raw_write_seqcount_latch - redirect readers to even/odd copy
  * @s: pointer to seqcount_t
@@ -284,8 +290,7 @@ static inline void raw_write_seqcount_en
  *     unsigned seq, idx;
  *
  *     do {
- *             seq = latch->seq;
- *             smp_rmb();
+ *             seq = lockless_dereference(latch->seq);
  *
  *             idx = seq & 0x01;
  *             entry = data_query(latch->data[idx], ...);
--- a/kernel/time/timekeeping.c
+++ b/kernel/time/timekeeping.c
@@ -393,7 +393,7 @@ static __always_inline u64 __ktime_get_f
        u64 now;
 
        do {
-               seq = raw_read_seqcount(&tkf->seq);
+               seq = raw_read_seqcount_latch(&tkf->seq);
                tkr = tkf->base + (seq & 0x01);
                now = ktime_to_ns(tkr->base) + timekeeping_get_ns(tkr);
        } while (read_seqcount_retry(&tkf->seq, seq));


--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/

Reply via email to