On 8 September 2016 at 04:57, Fenghua Yu <fenghua...@intel.com> wrote: > diff --git a/arch/x86/kernel/cpu/intel_rdt.c b/arch/x86/kernel/cpu/intel_rdt.c > index fcd0642..b25940a 100644 > --- a/arch/x86/kernel/cpu/intel_rdt.c > +++ b/arch/x86/kernel/cpu/intel_rdt.c > @@ -21,17 +21,94 @@ > */ > #include <linux/slab.h> > #include <linux/err.h> > +#include <asm/intel_rdt.h> > + > +/* > + * cctable maintains 1:1 mapping between CLOSid and cache bitmask. > + */ > +static struct clos_cbm_table *cctable; > +/* > + * closid availability bit map. > + */ > +unsigned long *closmap; > +static DEFINE_MUTEX(rdtgroup_mutex); > + > +static inline void closid_get(u32 closid) > +{ > + struct clos_cbm_table *cct = &cctable[closid]; > + > + lockdep_assert_held(&rdtgroup_mutex); > + > + cct->clos_refcnt++; > +} > + > +static int closid_alloc(u32 *closid) > +{ > + u32 maxid; > + u32 id; > + > + lockdep_assert_held(&rdtgroup_mutex); > + > + maxid = boot_cpu_data.x86_cache_max_closid; > + id = find_first_zero_bit(closmap, maxid); > + if (id == maxid) > + return -ENOSPC; > + > + set_bit(id, closmap); > + closid_get(id); > + *closid = id; > + > + return 0; > +} > + > +static inline void closid_free(u32 closid) > +{ > + clear_bit(closid, closmap); > + cctable[closid].cbm = 0; > +} > + > +static void closid_put(u32 closid) > +{ > + struct clos_cbm_table *cct = &cctable[closid]; > + > + lockdep_assert_held(&rdtgroup_mutex); > + if (WARN_ON(!cct->clos_refcnt)) > + return; > + > + if (!--cct->clos_refcnt) > + closid_free(closid); > +} >
I would suggest a small change in the function names here. I think the names closid_free and closid_put should be swapped. As I understand, under the current naming scheme, the opposite of closid_alloc() is closid_put() and the opposite of closid_get() is closid_free(). This is not the normal way these names are paired. Typically, alloc and free go as a pair and get and put go as a pair. -- Nilay