atomic_t variables are currently used to implement reference
counters with the following properties:
 - counter is initialized to 1 using atomic_set()
 - a resource is freed upon counter reaching zero
 - once counter reaches zero, its further
   increments aren't allowed
 - counter schema uses basic atomic operations
   (set, inc, inc_not_zero, dec_and_test, etc.)

Such atomic variables should be converted to a newly provided
refcount_t type and API that prevents accidental counter overflows
and underflows. This is important since overflows and underflows
can lead to use-after-free situation and be exploitable.

The variable numa_group.refcount is used as pure reference counter.
Convert it to refcount_t and fix up the operations.

Suggested-by: Kees Cook <keesc...@chromium.org>
Reviewed-by: David Windsor <dwind...@gmail.com>
Reviewed-by: Hans Liljestrand <ishkam...@gmail.com>
Signed-off-by: Elena Reshetova <elena.reshet...@intel.com>
---
 kernel/sched/fair.c | 12 ++++++------
 1 file changed, 6 insertions(+), 6 deletions(-)

diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index 27a2241..b4f0eb2 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -1050,7 +1050,7 @@ unsigned int sysctl_numa_balancing_scan_size = 256;
 unsigned int sysctl_numa_balancing_scan_delay = 1000;
 
 struct numa_group {
-       atomic_t refcount;
+       refcount_t refcount;
 
        spinlock_t lock; /* nr_tasks, tasks */
        int nr_tasks;
@@ -1119,7 +1119,7 @@ static unsigned int task_scan_start(struct task_struct *p)
                unsigned long shared = group_faults_shared(ng);
                unsigned long private = group_faults_priv(ng);
 
-               period *= atomic_read(&ng->refcount);
+               period *= refcount_read(&ng->refcount);
                period *= shared + 1;
                period /= private + shared + 1;
        }
@@ -1142,7 +1142,7 @@ static unsigned int task_scan_max(struct task_struct *p)
                unsigned long private = group_faults_priv(ng);
                unsigned long period = smax;
 
-               period *= atomic_read(&ng->refcount);
+               period *= refcount_read(&ng->refcount);
                period *= shared + 1;
                period /= private + shared + 1;
 
@@ -2227,12 +2227,12 @@ static void task_numa_placement(struct task_struct *p)
 
 static inline int get_numa_group(struct numa_group *grp)
 {
-       return atomic_inc_not_zero(&grp->refcount);
+       return refcount_inc_not_zero(&grp->refcount);
 }
 
 static inline void put_numa_group(struct numa_group *grp)
 {
-       if (atomic_dec_and_test(&grp->refcount))
+       if (refcount_dec_and_test(&grp->refcount))
                kfree_rcu(grp, rcu);
 }
 
@@ -2253,7 +2253,7 @@ static void task_numa_group(struct task_struct *p, int 
cpupid, int flags,
                if (!grp)
                        return;
 
-               atomic_set(&grp->refcount, 1);
+               refcount_set(&grp->refcount, 1);
                grp->active_nodes = 1;
                grp->max_faults_cpu = 0;
                spin_lock_init(&grp->lock);
-- 
2.7.4

Reply via email to