Hi Peter, I walked through the reference counting, and it seems good to me (though it did take a few passes to fully digest the invariants for the fat cookie stuff).
> +unsigned long sched_core_alloc_cookie(unsigned int type) > { > struct sched_core_cookie *ck = kmalloc(sizeof(*ck), GFP_KERNEL); > if (!ck) > return 0; > - refcount_set(&ck->refcnt, 1); > + WARN_ON_ONCE(type > GROUP_COOKIE); > + sched_core_init_cookie(ck, type); > sched_core_get(); > > - return (unsigned long)ck; > + return (unsigned long)ck | type; > } This feels like it needs to be stronger than a WARN_ON_ONCE; could create a corrupted address that we later try to kfree(). Also, for my own edification, why will the bottom two bits here always be 0? > -unsigned long sched_core_alloc_cookie(void) > +static inline void *cookie_ptr(unsigned long cookie) > +{ > + return (void *)(cookie & ~3UL); > +} > + > +static inline int cookie_type(unsigned long cookie) > +{ > + return cookie & 3; > +} s/3/FAT_COOKIE > +#define FAT_COOKIE 0x03 Move to sched.h to group with TASK/GROUP_COOKIE? > +static unsigned long __sched_core_fat_cookie(struct task_struct *p, > + void **spare_fat, > + unsigned long cookie) > +{ This function looks good to me, but could use some more comments about the pre/post-condition assumptions. Ie. cookie already has a get() associated with it, caller is expected to kfree the spare_fat. > + raw_spin_lock_irqsave(&fat_lock, flags); > + n = rb_find_add(&fat->node, &fat_root, fat_cmp); > + raw_spin_unlock_irqrestore(&fat_lock, flags); > + > + if (n) { > + sched_core_put_fat(fat); > + fat = node_2_fat(n); This put() doesn't seem strictly necessary; caller will be unconditionally freeing the spare_fat. Keep anyway for completeness, but add a comment?