__this_cpu_write doesn't need to be protected by spinlock, AS we are doing
per cpu write with preempt disabled. And another reason to remove 
__this_cpu_write
outside of spinlock: __percpu_counter_sum is not a accurate counter.  

Signed-off-by: Fan Du <fan...@windriver.com>
---
 lib/percpu_counter.c |    2 +-
 1 files changed, 1 insertions(+), 1 deletions(-)

diff --git a/lib/percpu_counter.c b/lib/percpu_counter.c
index ba6085d..1fc23a3 100644
--- a/lib/percpu_counter.c
+++ b/lib/percpu_counter.c
@@ -80,8 +80,8 @@ void __percpu_counter_add(struct percpu_counter *fbc, s64 
amount, s32 batch)
        if (count >= batch || count <= -batch) {
                raw_spin_lock(&fbc->lock);
                fbc->count += count;
-               __this_cpu_write(*fbc->counters, 0);
                raw_spin_unlock(&fbc->lock);
+               __this_cpu_write(*fbc->counters, 0);
        } else {
                __this_cpu_write(*fbc->counters, count);
        }
-- 
1.7.1

--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/

Reply via email to