Release percpu memory after finishing the switch to the atomic mode
if only PERCPU_REF_ALLOW_REINIT isn't set.

Signed-off-by: Roman Gushchin <[email protected]>
---
 include/linux/percpu-refcount.h |  1 +
 lib/percpu-refcount.c           | 13 +++++++++++--
 2 files changed, 12 insertions(+), 2 deletions(-)

diff --git a/include/linux/percpu-refcount.h b/include/linux/percpu-refcount.h
index 0f0240af8520..7aef0abc194a 100644
--- a/include/linux/percpu-refcount.h
+++ b/include/linux/percpu-refcount.h
@@ -102,6 +102,7 @@ struct percpu_ref {
        percpu_ref_func_t       *release;
        percpu_ref_func_t       *confirm_switch;
        bool                    force_atomic:1;
+       bool                    allow_reinit:1;
        struct rcu_head         rcu;
 };
 
diff --git a/lib/percpu-refcount.c b/lib/percpu-refcount.c
index da54318d3b55..47f0aeb136c4 100644
--- a/lib/percpu-refcount.c
+++ b/lib/percpu-refcount.c
@@ -69,11 +69,14 @@ int percpu_ref_init(struct percpu_ref *ref, 
percpu_ref_func_t *release,
                return -ENOMEM;
 
        ref->force_atomic = flags & PERCPU_REF_INIT_ATOMIC;
+       ref->allow_reinit = flags & PERCPU_REF_ALLOW_REINIT;
 
-       if (flags & (PERCPU_REF_INIT_ATOMIC | PERCPU_REF_INIT_DEAD))
+       if (flags & (PERCPU_REF_INIT_ATOMIC | PERCPU_REF_INIT_DEAD)) {
                ref->percpu_count_ptr |= __PERCPU_REF_ATOMIC;
-       else
+               ref->allow_reinit = true;
+       } else {
                start_count += PERCPU_COUNT_BIAS;
+       }
 
        if (flags & PERCPU_REF_INIT_DEAD)
                ref->percpu_count_ptr |= __PERCPU_REF_DEAD;
@@ -119,6 +122,9 @@ static void percpu_ref_call_confirm_rcu(struct rcu_head 
*rcu)
        ref->confirm_switch = NULL;
        wake_up_all(&percpu_ref_switch_waitq);
 
+       if (!ref->allow_reinit)
+               percpu_ref_exit(ref);
+
        /* drop ref from percpu_ref_switch_to_atomic() */
        percpu_ref_put(ref);
 }
@@ -194,6 +200,9 @@ static void __percpu_ref_switch_to_percpu(struct percpu_ref 
*ref)
        if (!(ref->percpu_count_ptr & __PERCPU_REF_ATOMIC))
                return;
 
+       if (WARN_ON_ONCE(!ref->allow_reinit))
+               return;
+
        atomic_long_add(PERCPU_COUNT_BIAS, &ref->count);
 
        /*
-- 
2.20.1

Reply via email to