On 19/01/15 15:58, Jan Beulich wrote: > --- a/xen/common/core_parking.c > +++ b/xen/common/core_parking.c > @@ -75,11 +75,10 @@ static unsigned int core_parking_perform > if ( core_weight < core_tmp ) > { > core_weight = core_tmp; > - cpumask_clear(&core_candidate_map); > - cpumask_set_cpu(cpu, &core_candidate_map); > + cpumask_copy(&core_candidate_map, cpumask_of(cpu));
It is probably worth mentioning changes like this in the commit message, as they are slightly more than just a simple removal of the lock prefix. > } > else if ( core_weight == core_tmp ) > - cpumask_set_cpu(cpu, &core_candidate_map); > + __cpumask_set_cpu(cpu, &core_candidate_map); > } > > for_each_cpu(cpu, &core_candidate_map) > --- a/xen/common/sched_rt.c > +++ b/xen/common/sched_rt.c > @@ -663,7 +663,7 @@ burn_budget(const struct scheduler *ops, > * lock is grabbed before calling this function > */ > static struct rt_vcpu * > -__runq_pick(const struct scheduler *ops, cpumask_t *mask) > +__runq_pick(const struct scheduler *ops, const cpumask_t *mask) > { > struct list_head *runq = rt_runq(ops); > struct list_head *iter; > @@ -780,10 +780,7 @@ rt_schedule(const struct scheduler *ops, > } > else > { > - cpumask_t cur_cpu; > - cpumask_clear(&cur_cpu); > - cpumask_set_cpu(cpu, &cur_cpu); > - snext = __runq_pick(ops, &cur_cpu); > + snext = __runq_pick(ops, cpumask_of(cpu)); > if ( snext == NULL ) > snext = rt_vcpu(idle_vcpu[cpu]); > > --- a/xen/common/softirq.c > +++ b/xen/common/softirq.c > @@ -88,7 +88,7 @@ void cpumask_raise_softirq(const cpumask > if ( !test_and_set_bit(nr, &softirq_pending(cpu)) && > cpu != this_cpu && > !arch_skip_send_event_check(cpu) ) > - cpumask_set_cpu(cpu, raise_mask); > + __cpumask_set_cpu(cpu, raise_mask); > > if ( raise_mask == &send_mask ) > smp_send_event_check_mask(raise_mask); > @@ -106,7 +106,7 @@ void cpu_raise_softirq(unsigned int cpu, > if ( !per_cpu(batching, this_cpu) || in_irq() ) > smp_send_event_check_cpu(cpu); > else > - set_bit(nr, &per_cpu(batch_mask, this_cpu)); > + __cpumask_set_cpu(nr, &per_cpu(batch_mask, this_cpu)); > } > > void cpu_raise_softirq_batch_begin(void) > @@ -122,7 +122,7 @@ void cpu_raise_softirq_batch_finish(void > ASSERT(per_cpu(batching, this_cpu)); > for_each_cpu ( cpu, mask ) > if ( !softirq_pending(cpu) ) > - cpumask_clear_cpu(cpu, mask); > + __cpumask_clr_cpu(cpu, mask); > smp_send_event_check_mask(mask); > cpumask_clear(mask); > --per_cpu(batching, this_cpu); > --- a/xen/include/xen/cpumask.h > +++ b/xen/include/xen/cpumask.h > @@ -103,11 +103,21 @@ static inline void cpumask_set_cpu(int c > set_bit(cpumask_check(cpu), dstp->bits); > } > > +static inline void __cpumask_set_cpu(int cpu, cpumask_t *dstp) > +{ > + __set_bit(cpumask_check(cpu), dstp->bits); > +} > + > static inline void cpumask_clear_cpu(int cpu, volatile cpumask_t *dstp) > { > clear_bit(cpumask_check(cpu), dstp->bits); > } > > +static inline void __cpumask_clr_cpu(int cpu, cpumask_t *dstp) > +{ > + __clear_bit(cpumask_check(cpu), dstp->bits); > +} > + While I can appreciate the want for a shorter function name, I feel that consistency with its locked alternative is more important. > static inline void cpumask_setall(cpumask_t *dstp) > { > bitmap_fill(dstp->bits, nr_cpumask_bits); > @@ -122,16 +132,26 @@ static inline void cpumask_clear(cpumask > #define cpumask_test_cpu(cpu, cpumask) \ > test_bit(cpumask_check(cpu), (cpumask)->bits) > > -static inline int cpumask_test_and_set_cpu(int cpu, cpumask_t *addr) > +static inline int cpumask_test_and_set_cpu(int cpu, volatile cpumask_t *addr) > { > return test_and_set_bit(cpumask_check(cpu), addr->bits); > } > > -static inline int cpumask_test_and_clear_cpu(int cpu, cpumask_t *addr) > +static inline int __cpumask_tst_set_cpu(int cpu, cpumask_t *addr) > +{ > + return __test_and_set_bit(cpumask_check(cpu), addr->bits); > +} > + > +static inline int cpumask_test_and_clear_cpu(int cpu, volatile cpumask_t > *addr) This introduction of volatile also need mentioning in the commit message, but I would agree that it should be here. ~Andrew > { > return test_and_clear_bit(cpumask_check(cpu), addr->bits); > } > > +static inline int __cpumask_tst_clr_cpu(int cpu, cpumask_t *addr) > +{ > + return __test_and_clear_bit(cpumask_check(cpu), addr->bits); > +} > + > static inline void cpumask_and(cpumask_t *dstp, const cpumask_t *src1p, > const cpumask_t *src2p) > { > > > > > _______________________________________________ > Xen-devel mailing list > Xen-devel@lists.xen.org > http://lists.xen.org/xen-devel
_______________________________________________ Xen-devel mailing list Xen-devel@lists.xen.org http://lists.xen.org/xen-devel