We already have a number of calls converted, but still a decent
amount of ACCESS_ONCE callers in core-code, which is somewhat
troubling. Convert more users (even if some are scalar types,
we want to someday get rid of ACCESS_ONCE altogether I suppose.

Signed-off-by: Davidlohr Bueso <dbu...@suse.de>
---
 Documentation/atomic_ops.txt | 18 +++++++++---------
 include/linux/bitops.h       |  2 +-
 include/linux/llist.h        |  2 +-
 kernel/acct.c                |  4 ++--
 kernel/events/core.c         |  6 +++---
 kernel/events/ring_buffer.c  |  2 +-
 kernel/exit.c                |  2 +-
 kernel/kthread.c             |  2 +-
 kernel/task_work.c           |  6 +++---
 kernel/trace/ring_buffer.c   |  2 +-
 kernel/trace/trace.h         |  2 +-
 kernel/trace/trace_stack.c   |  2 +-
 kernel/user_namespace.c      |  2 +-
 kernel/watchdog.c            |  4 ++--
 kernel/workqueue.c           |  4 ++--
 15 files changed, 30 insertions(+), 30 deletions(-)

diff --git a/Documentation/atomic_ops.txt b/Documentation/atomic_ops.txt
index c9d1cacb4395..9e880d221820 100644
--- a/Documentation/atomic_ops.txt
+++ b/Documentation/atomic_ops.txt
@@ -90,10 +90,10 @@ compiler optimizes the section accessing atomic_t variables.
 
 Properly aligned pointers, longs, ints, and chars (and unsigned
 equivalents) may be atomically loaded from and stored to in the same
-sense as described for atomic_read() and atomic_set().  The ACCESS_ONCE()
-macro should be used to prevent the compiler from using optimizations
-that might otherwise optimize accesses out of existence on the one hand,
-or that might create unsolicited accesses on the other.
+sense as described for atomic_read() and atomic_set().  The READ_ONCE()
+and WRITE_ONCE() macros should be used to prevent the compiler from using
+optimizations that might otherwise optimize accesses out of existence on
+the one hand, or that might create unsolicited accesses on the other.
 
 For example consider the following code:
 
@@ -112,7 +112,7 @@ the following:
 If you don't want the compiler to do this (and you probably don't), then
 you should use something like the following:
 
-       while (ACCESS_ONCE(a) < 0)
+       while (READ_ONCE(a) < 0)
                do_something();
 
 Alternatively, you could place a barrier() call in the loop.
@@ -141,7 +141,7 @@ of registers: reloading from variable a could save a flush 
to the
 stack and later reload.  To prevent the compiler from attacking your
 code in this manner, write the following:
 
-       tmp_a = ACCESS_ONCE(a);
+       tmp_a = READ_ONCE(a);
        do_something_with(tmp_a);
        do_something_else_with(tmp_a);
 
@@ -166,14 +166,14 @@ that expected b to never have the value 42 if a was zero. 
 To prevent
 the compiler from doing this, write something like:
 
        if (a)
-               ACCESS_ONCE(b) = 9;
+               WRITE_ONCE(b, 9);
        else
-               ACCESS_ONCE(b) = 42;
+               WRITE_ONCE(b, 42);
 
 Don't even -think- about doing this without proper use of memory barriers,
 locks, or atomic operations if variable a can change at runtime!
 
-*** WARNING: ACCESS_ONCE() DOES NOT IMPLY A BARRIER! ***
+*** WARNING: NEITHER READ_ONCE() OR WRITE_ONCE() IMPLY A BARRIER! ***
 
 Now, we move onto the atomic operation interfaces typically implemented with
 the help of assembly code.
diff --git a/include/linux/bitops.h b/include/linux/bitops.h
index defeaac0745f..26da22da6f63 100644
--- a/include/linux/bitops.h
+++ b/include/linux/bitops.h
@@ -219,7 +219,7 @@ static inline unsigned long __ffs64(u64 word)
        typeof(*ptr) old, new;                                  \
                                                                \
        do {                                                    \
-               old = ACCESS_ONCE(*ptr);                        \
+               old = READ_ONCE(*ptr);                          \
                new = (old & ~mask) | bits;                     \
        } while (cmpxchg(ptr, old, new) != old);                \
                                                                \
diff --git a/include/linux/llist.h b/include/linux/llist.h
index fd4ca0b4fe0f..2381ace84815 100644
--- a/include/linux/llist.h
+++ b/include/linux/llist.h
@@ -157,7 +157,7 @@ static inline void init_llist_head(struct llist_head *list)
  */
 static inline bool llist_empty(const struct llist_head *head)
 {
-       return ACCESS_ONCE(head->first) == NULL;
+       return READ_ONCE(head->first) == NULL;
 }
 
 static inline struct llist_node *llist_next(struct llist_node *node)
diff --git a/kernel/acct.c b/kernel/acct.c
index 74963d192c5d..fca2d25c3708 100644
--- a/kernel/acct.c
+++ b/kernel/acct.c
@@ -144,7 +144,7 @@ static struct bsd_acct_struct *acct_get(struct 
pid_namespace *ns)
 again:
        smp_rmb();
        rcu_read_lock();
-       res = to_acct(ACCESS_ONCE(ns->bacct));
+       res = to_acct(READ_ONCE(ns->bacct));
        if (!res) {
                rcu_read_unlock();
                return NULL;
@@ -156,7 +156,7 @@ again:
        }
        rcu_read_unlock();
        mutex_lock(&res->lock);
-       if (res != to_acct(ACCESS_ONCE(ns->bacct))) {
+       if (res != to_acct(READ_ONCE(ns->bacct))) {
                mutex_unlock(&res->lock);
                acct_put(res);
                goto again;
diff --git a/kernel/events/core.c b/kernel/events/core.c
index 8c3b35f2a269..ca9060cdd6a0 100644
--- a/kernel/events/core.c
+++ b/kernel/events/core.c
@@ -1119,7 +1119,7 @@ perf_event_ctx_lock_nested(struct perf_event *event, int 
nesting)
 
 again:
        rcu_read_lock();
-       ctx = ACCESS_ONCE(event->ctx);
+       ctx = READ_ONCE(event->ctx);
        if (!atomic_inc_not_zero(&ctx->refcount)) {
                rcu_read_unlock();
                goto again;
@@ -4881,8 +4881,8 @@ static int perf_mmap(struct file *file, struct 
vm_area_struct *vma)
                if (!rb)
                        goto aux_unlock;
 
-               aux_offset = ACCESS_ONCE(rb->user_page->aux_offset);
-               aux_size = ACCESS_ONCE(rb->user_page->aux_size);
+               aux_offset = READ_ONCE(rb->user_page->aux_offset);
+               aux_size = READ_ONCE(rb->user_page->aux_size);
 
                if (aux_offset < perf_data_size(rb) + PAGE_SIZE)
                        goto aux_unlock;
diff --git a/kernel/events/ring_buffer.c b/kernel/events/ring_buffer.c
index 60be55a64040..b39f4f809b62 100644
--- a/kernel/events/ring_buffer.c
+++ b/kernel/events/ring_buffer.c
@@ -346,7 +346,7 @@ void *perf_aux_output_begin(struct perf_output_handle 
*handle,
         * (B) <-> (C) ordering is still observed by the pmu driver.
         */
        if (!rb->aux_overwrite) {
-               aux_tail = ACCESS_ONCE(rb->user_page->aux_tail);
+               aux_tail = READ_ONCE(rb->user_page->aux_tail);
                handle->wakeup = local_read(&rb->aux_wakeup) + 
rb->aux_watermark;
                if (aux_head - aux_tail < perf_aux_size(rb))
                        handle->size = CIRC_SPACE(aux_head, aux_tail, 
perf_aux_size(rb));
diff --git a/kernel/exit.c b/kernel/exit.c
index fd90195667e1..d0d57d938366 100644
--- a/kernel/exit.c
+++ b/kernel/exit.c
@@ -1294,7 +1294,7 @@ static int wait_consider_task(struct wait_opts *wo, int 
ptrace,
         * Ensure that EXIT_ZOMBIE -> EXIT_DEAD/EXIT_TRACE transition
         * can't confuse the checks below.
         */
-       int exit_state = ACCESS_ONCE(p->exit_state);
+       int exit_state = READ_ONCE(p->exit_state);
        int ret;
 
        if (unlikely(exit_state == EXIT_DEAD))
diff --git a/kernel/kthread.c b/kernel/kthread.c
index 9ff173dca1ae..fefa65d102b9 100644
--- a/kernel/kthread.c
+++ b/kernel/kthread.c
@@ -63,7 +63,7 @@ static inline struct kthread *to_kthread(struct task_struct 
*k)
 
 static struct kthread *to_live_kthread(struct task_struct *k)
 {
-       struct completion *vfork = ACCESS_ONCE(k->vfork_done);
+       struct completion *vfork = READ_ONCE(k->vfork_done);
        if (likely(vfork))
                return __to_kthread(vfork);
        return NULL;
diff --git a/kernel/task_work.c b/kernel/task_work.c
index 53fa971d000d..b16168d6988b 100644
--- a/kernel/task_work.c
+++ b/kernel/task_work.c
@@ -29,7 +29,7 @@ task_work_add(struct task_struct *task, struct callback_head 
*work, bool notify)
        struct callback_head *head;
 
        do {
-               head = ACCESS_ONCE(task->task_works);
+               head = READ_ONCE(task->task_works);
                if (unlikely(head == &work_exited))
                        return -ESRCH;
                work->next = head;
@@ -64,7 +64,7 @@ task_work_cancel(struct task_struct *task, task_work_func_t 
func)
         * we raced with task_work_run(), *pprev == NULL/exited.
         */
        raw_spin_lock_irqsave(&task->pi_lock, flags);
-       while ((work = ACCESS_ONCE(*pprev))) {
+       while ((work = READ_ONCE(*pprev))) {
                smp_read_barrier_depends();
                if (work->func != func)
                        pprev = &work->next;
@@ -95,7 +95,7 @@ void task_work_run(void)
                 * work_exited unless the list is empty.
                 */
                do {
-                       work = ACCESS_ONCE(task->task_works);
+                       work = READ_ONCE(task->task_works);
                        head = !work && (task->flags & PF_EXITING) ?
                                &work_exited : NULL;
                } while (cmpxchg(&task->task_works, work, head) != work);
diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c
index 95181e36891a..c629d0a392a9 100644
--- a/kernel/trace/ring_buffer.c
+++ b/kernel/trace/ring_buffer.c
@@ -2753,7 +2753,7 @@ rb_reserve_next_event(struct ring_buffer *buffer,
         * if it happened, we have to fail the write.
         */
        barrier();
-       if (unlikely(ACCESS_ONCE(cpu_buffer->buffer) != buffer)) {
+       if (unlikely(READ_ONCE(cpu_buffer->buffer) != buffer)) {
                local_dec(&cpu_buffer->committing);
                local_dec(&cpu_buffer->commits);
                return NULL;
diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h
index 3fff4adfd431..7a1a0c512a89 100644
--- a/kernel/trace/trace.h
+++ b/kernel/trace/trace.h
@@ -1154,7 +1154,7 @@ extern struct trace_event_file *find_event_file(struct 
trace_array *tr,
 
 static inline void *event_file_data(struct file *filp)
 {
-       return ACCESS_ONCE(file_inode(filp)->i_private);
+       return READ_ONCE(file_inode(filp)->i_private);
 }
 
 extern struct mutex event_mutex;
diff --git a/kernel/trace/trace_stack.c b/kernel/trace/trace_stack.c
index 2a1abbaca10e..008a28ad652a 100644
--- a/kernel/trace/trace_stack.c
+++ b/kernel/trace/trace_stack.c
@@ -76,7 +76,7 @@ check_stack(unsigned long ip, unsigned long *stack)
 {
        unsigned long this_size, flags; unsigned long *p, *top, *start;
        static int tracer_frame;
-       int frame_size = ACCESS_ONCE(tracer_frame);
+       int frame_size = READ_ONCE(tracer_frame);
        int i, x;
 
        this_size = ((unsigned long)stack) & (THREAD_SIZE-1);
diff --git a/kernel/user_namespace.c b/kernel/user_namespace.c
index 9bafc211930c..2dc24bcc99f8 100644
--- a/kernel/user_namespace.c
+++ b/kernel/user_namespace.c
@@ -847,7 +847,7 @@ static bool new_idmap_permitted(const struct file *file,
 int proc_setgroups_show(struct seq_file *seq, void *v)
 {
        struct user_namespace *ns = seq->private;
-       unsigned long userns_flags = ACCESS_ONCE(ns->flags);
+       unsigned long userns_flags = READ_ONCE(ns->flags);
 
        seq_printf(seq, "%s\n",
                   (userns_flags & USERNS_SETGROUPS_ALLOWED) ?
diff --git a/kernel/watchdog.c b/kernel/watchdog.c
index 9acb29f280ec..0dda0b005927 100644
--- a/kernel/watchdog.c
+++ b/kernel/watchdog.c
@@ -981,7 +981,7 @@ int proc_watchdog_thresh(struct ctl_table *table, int write,
                goto out;
        }
 
-       old = ACCESS_ONCE(watchdog_thresh);
+       old = READ_ONCE(watchdog_thresh);
        err = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
 
        if (err || !write)
@@ -990,7 +990,7 @@ int proc_watchdog_thresh(struct ctl_table *table, int write,
        /*
         * Update the sample period. Restore on failure.
         */
-       new = ACCESS_ONCE(watchdog_thresh);
+       new = READ_ONCE(watchdog_thresh);
        if (old == new)
                goto out;
 
diff --git a/kernel/workqueue.c b/kernel/workqueue.c
index 2232ae3e3ad6..767aa8ae7492 100644
--- a/kernel/workqueue.c
+++ b/kernel/workqueue.c
@@ -4550,7 +4550,7 @@ static void rebind_workers(struct worker_pool *pool)
                 * concurrency management.  Note that when or whether
                 * @worker clears REBOUND doesn't affect correctness.
                 *
-                * ACCESS_ONCE() is necessary because @worker->flags may be
+                * WRITE_ONCE() is necessary because @worker->flags may be
                 * tested without holding any lock in
                 * wq_worker_waking_up().  Without it, NOT_RUNNING test may
                 * fail incorrectly leading to premature concurrency
@@ -4559,7 +4559,7 @@ static void rebind_workers(struct worker_pool *pool)
                WARN_ON_ONCE(!(worker_flags & WORKER_UNBOUND));
                worker_flags |= WORKER_REBOUND;
                worker_flags &= ~WORKER_UNBOUND;
-               ACCESS_ONCE(worker->flags) = worker_flags;
+               WRITE_ONCE(worker->flags, worker_flags);
        }
 
        spin_unlock_irq(&pool->lock);
-- 
2.8.1

Reply via email to