On 11/02/15 13:42, Jan Beulich wrote:
> Using atomic (LOCKed on x86) bitops for certain of the operations on
> cpumask_t is overkill when the variables aren't concurrently accessible
> (e.g. local function variables, or due to explicit locking). Introduce
> alternatives using non-atomic bitops and use them where appropriate.
>
> Note that this
> - adds a volatile qualifier to cpumask_test_and_{clear,set}_cpu()
>   (should have been there from the beginning, like is the case for
>   cpumask_{clear,set}_cpu())
> - replaces several cpumask_clear()+cpumask_set_cpu(, n) pairs by the
>   simpler cpumask_copy(, cpumask_of(n)) (or just cpumask_of(n) if we
>   can do without copying)
>
> Signed-off-by: Jan Beulich <jbeul...@suse.com>
> Acked-by: George Dunlap <george.dun...@eu.citrix.com>

Reviewed-by: Andrew Cooper <andrew.coop...@citrix.com>

> ---
> v2: Make naming of new functions consistent with exisiting ones.
>
> --- a/xen/arch/x86/hpet.c
> +++ b/xen/arch/x86/hpet.c
> @@ -158,7 +158,7 @@ static void evt_do_broadcast(cpumask_t *
>  {
>      unsigned int cpu = smp_processor_id();
>  
> -    if ( cpumask_test_and_clear_cpu(cpu, mask) )
> +    if ( __cpumask_test_and_clear_cpu(cpu, mask) )
>          raise_softirq(TIMER_SOFTIRQ);
>  
>      cpuidle_wakeup_mwait(mask);
> @@ -197,7 +197,7 @@ again:
>              continue;
>  
>          if ( deadline <= now )
> -            cpumask_set_cpu(cpu, &mask);
> +            __cpumask_set_cpu(cpu, &mask);
>          else if ( deadline < next_event )
>              next_event = deadline;
>      }
> --- a/xen/arch/x86/irq.c
> +++ b/xen/arch/x86/irq.c
> @@ -1450,7 +1450,7 @@ void desc_guest_eoi(struct irq_desc *des
>          
>      cpumask_copy(&cpu_eoi_map, action->cpu_eoi_map);
>  
> -    if ( cpumask_test_and_clear_cpu(smp_processor_id(), &cpu_eoi_map) )
> +    if ( __cpumask_test_and_clear_cpu(smp_processor_id(), &cpu_eoi_map) )
>      {
>          __set_eoi_ready(desc);
>          spin_unlock(&desc->lock);
> --- a/xen/arch/x86/mm.c
> +++ b/xen/arch/x86/mm.c
> @@ -3216,7 +3216,7 @@ long do_mmuext_op(
>                  for_each_online_cpu(cpu)
>                      if ( !cpumask_intersects(&mask,
>                                               per_cpu(cpu_sibling_mask, cpu)) 
> )
> -                        cpumask_set_cpu(cpu, &mask);
> +                        __cpumask_set_cpu(cpu, &mask);
>                  flush_mask(&mask, FLUSH_CACHE);
>              }
>              else
> --- a/xen/arch/x86/platform_hypercall.c
> +++ b/xen/arch/x86/platform_hypercall.c
> @@ -489,7 +489,7 @@ ret_t do_platform_op(XEN_GUEST_HANDLE_PA
>  
>              if ( !idletime )
>              {
> -                cpumask_clear_cpu(cpu, cpumap);
> +                __cpumask_clear_cpu(cpu, cpumap);
>                  continue;
>              }
>  
> --- a/xen/arch/x86/time.c
> +++ b/xen/arch/x86/time.c
> @@ -179,7 +179,7 @@ static void smp_send_timer_broadcast_ipi
>  
>      if ( cpumask_test_cpu(cpu, &mask) )
>      {
> -        cpumask_clear_cpu(cpu, &mask);
> +        __cpumask_clear_cpu(cpu, &mask);
>          raise_softirq(TIMER_SOFTIRQ);
>      }
>  
> --- a/xen/common/core_parking.c
> +++ b/xen/common/core_parking.c
> @@ -75,11 +75,10 @@ static unsigned int core_parking_perform
>              if ( core_weight < core_tmp )
>              {
>                  core_weight = core_tmp;
> -                cpumask_clear(&core_candidate_map);
> -                cpumask_set_cpu(cpu, &core_candidate_map);
> +                cpumask_copy(&core_candidate_map, cpumask_of(cpu));
>              }
>              else if ( core_weight == core_tmp )
> -                cpumask_set_cpu(cpu, &core_candidate_map);
> +                __cpumask_set_cpu(cpu, &core_candidate_map);
>          }
>  
>          for_each_cpu(cpu, &core_candidate_map)
> @@ -88,11 +87,10 @@ static unsigned int core_parking_perform
>              if ( sibling_weight < sibling_tmp )
>              {
>                  sibling_weight = sibling_tmp;
> -                cpumask_clear(&sibling_candidate_map);
> -                cpumask_set_cpu(cpu, &sibling_candidate_map);
> +                cpumask_copy(&sibling_candidate_map, cpumask_of(cpu));
>              }
>              else if ( sibling_weight == sibling_tmp )
> -                cpumask_set_cpu(cpu, &sibling_candidate_map);
> +                __cpumask_set_cpu(cpu, &sibling_candidate_map);
>          }
>  
>          cpu = cpumask_first(&sibling_candidate_map);
> @@ -135,11 +133,10 @@ static unsigned int core_parking_power(u
>              if ( core_weight > core_tmp )
>              {
>                  core_weight = core_tmp;
> -                cpumask_clear(&core_candidate_map);
> -                cpumask_set_cpu(cpu, &core_candidate_map);
> +                cpumask_copy(&core_candidate_map, cpumask_of(cpu));
>              }
>              else if ( core_weight == core_tmp )
> -                cpumask_set_cpu(cpu, &core_candidate_map);
> +                __cpumask_set_cpu(cpu, &core_candidate_map);
>          }
>  
>          for_each_cpu(cpu, &core_candidate_map)
> @@ -148,11 +145,10 @@ static unsigned int core_parking_power(u
>              if ( sibling_weight > sibling_tmp )
>              {
>                  sibling_weight = sibling_tmp;
> -                cpumask_clear(&sibling_candidate_map);
> -                cpumask_set_cpu(cpu, &sibling_candidate_map);
> +                cpumask_copy(&sibling_candidate_map, cpumask_of(cpu));
>              }
>              else if ( sibling_weight == sibling_tmp )
> -                cpumask_set_cpu(cpu, &sibling_candidate_map);
> +                __cpumask_set_cpu(cpu, &sibling_candidate_map);
>          }
>  
>          cpu = cpumask_first(&sibling_candidate_map);
> --- a/xen/common/cpu.c
> +++ b/xen/common/cpu.c
> @@ -192,7 +192,7 @@ int disable_nonboot_cpus(void)
>              break;
>          }
>  
> -        cpumask_set_cpu(cpu, &frozen_cpus);
> +        __cpumask_set_cpu(cpu, &frozen_cpus);
>      }
>  
>      BUG_ON(!error && (num_online_cpus() != 1));
> --- a/xen/common/page_alloc.c
> +++ b/xen/common/page_alloc.c
> @@ -1337,7 +1337,7 @@ static int __init find_non_smt(unsigned 
>          if ( cpumask_intersects(dest, per_cpu(cpu_sibling_mask, i)) )
>              continue;
>          cpu = cpumask_first(per_cpu(cpu_sibling_mask, i));
> -        cpumask_set_cpu(cpu, dest);
> +        __cpumask_set_cpu(cpu, dest);
>      }
>      return cpumask_weight(dest);
>  }
> @@ -1449,7 +1449,7 @@ void __init scrub_heap_pages(void)
>          cpus = find_non_smt(best_node, &node_cpus);
>          if ( cpus == 0 )
>          {
> -            cpumask_set_cpu(smp_processor_id(), &node_cpus);
> +            __cpumask_set_cpu(smp_processor_id(), &node_cpus);
>              cpus = 1;
>          }
>          /* We already have the node information from round #0. */
> --- a/xen/common/sched_credit.c
> +++ b/xen/common/sched_credit.c
> @@ -372,7 +372,7 @@ __runq_tickle(unsigned int cpu, struct c
>      {
>          if ( cur->pri != CSCHED_PRI_IDLE )
>              SCHED_STAT_CRANK(tickle_idlers_none);
> -        cpumask_set_cpu(cpu, &mask);
> +        __cpumask_set_cpu(cpu, &mask);
>      }
>      else if ( !idlers_empty )
>      {
> @@ -422,7 +422,7 @@ __runq_tickle(unsigned int cpu, struct c
>                  SCHED_VCPU_STAT_CRANK(cur, migrate_r);
>                  SCHED_STAT_CRANK(migrate_kicked_away);
>                  set_bit(_VPF_migrating, &cur->vcpu->pause_flags);
> -                cpumask_set_cpu(cpu, &mask);
> +                __cpumask_set_cpu(cpu, &mask);
>              }
>              else if ( !new_idlers_empty )
>              {
> @@ -432,7 +432,7 @@ __runq_tickle(unsigned int cpu, struct c
>                  {
>                      this_cpu(last_tickle_cpu) =
>                          cpumask_cycle(this_cpu(last_tickle_cpu), &idle_mask);
> -                    cpumask_set_cpu(this_cpu(last_tickle_cpu), &mask);
> +                    __cpumask_set_cpu(this_cpu(last_tickle_cpu), &mask);
>                  }
>                  else
>                      cpumask_or(&mask, &mask, &idle_mask);
> @@ -675,7 +675,7 @@ _csched_cpu_pick(const struct scheduler 
>           */
>          cpumask_and(&idlers, &cpu_online_map, CSCHED_PRIV(ops)->idlers);
>          if ( vc->processor == cpu && IS_RUNQ_IDLE(cpu) )
> -            cpumask_set_cpu(cpu, &idlers);
> +            __cpumask_set_cpu(cpu, &idlers);
>          cpumask_and(&cpus, &cpus, &idlers);
>  
>          /*
> @@ -692,7 +692,7 @@ _csched_cpu_pick(const struct scheduler 
>           */
>          if ( !cpumask_test_cpu(cpu, &cpus) && !cpumask_empty(&cpus) )
>              cpu = cpumask_cycle(cpu, &cpus);
> -        cpumask_clear_cpu(cpu, &cpus);
> +        __cpumask_clear_cpu(cpu, &cpus);
>  
>          while ( !cpumask_empty(&cpus) )
>          {
> @@ -1536,7 +1536,7 @@ csched_load_balance(struct csched_privat
>              /* Find out what the !idle are in this node */
>              cpumask_andnot(&workers, online, prv->idlers);
>              cpumask_and(&workers, &workers, &node_to_cpumask(peer_node));
> -            cpumask_clear_cpu(cpu, &workers);
> +            __cpumask_clear_cpu(cpu, &workers);
>  
>              peer_cpu = cpumask_first(&workers);
>              if ( peer_cpu >= nr_cpu_ids )
> --- a/xen/common/sched_rt.c
> +++ b/xen/common/sched_rt.c
> @@ -663,7 +663,7 @@ burn_budget(const struct scheduler *ops,
>   * lock is grabbed before calling this function
>   */
>  static struct rt_vcpu *
> -__runq_pick(const struct scheduler *ops, cpumask_t *mask)
> +__runq_pick(const struct scheduler *ops, const cpumask_t *mask)
>  {
>      struct list_head *runq = rt_runq(ops);
>      struct list_head *iter;
> @@ -780,10 +780,7 @@ rt_schedule(const struct scheduler *ops,
>      }
>      else
>      {
> -        cpumask_t cur_cpu;
> -        cpumask_clear(&cur_cpu);
> -        cpumask_set_cpu(cpu, &cur_cpu);
> -        snext = __runq_pick(ops, &cur_cpu);
> +        snext = __runq_pick(ops, cpumask_of(cpu));
>          if ( snext == NULL )
>              snext = rt_vcpu(idle_vcpu[cpu]);
>  
> --- a/xen/common/softirq.c
> +++ b/xen/common/softirq.c
> @@ -88,7 +88,7 @@ void cpumask_raise_softirq(const cpumask
>          if ( !test_and_set_bit(nr, &softirq_pending(cpu)) &&
>               cpu != this_cpu &&
>               !arch_skip_send_event_check(cpu) )
> -            cpumask_set_cpu(cpu, raise_mask);
> +            __cpumask_set_cpu(cpu, raise_mask);
>  
>      if ( raise_mask == &send_mask )
>          smp_send_event_check_mask(raise_mask);
> @@ -106,7 +106,7 @@ void cpu_raise_softirq(unsigned int cpu,
>      if ( !per_cpu(batching, this_cpu) || in_irq() )
>          smp_send_event_check_cpu(cpu);
>      else
> -        set_bit(nr, &per_cpu(batch_mask, this_cpu));
> +        __cpumask_set_cpu(nr, &per_cpu(batch_mask, this_cpu));
>  }
>  
>  void cpu_raise_softirq_batch_begin(void)
> @@ -122,7 +122,7 @@ void cpu_raise_softirq_batch_finish(void
>      ASSERT(per_cpu(batching, this_cpu));
>      for_each_cpu ( cpu, mask )
>          if ( !softirq_pending(cpu) )
> -            cpumask_clear_cpu(cpu, mask);
> +            __cpumask_clear_cpu(cpu, mask);
>      smp_send_event_check_mask(mask);
>      cpumask_clear(mask);
>      --per_cpu(batching, this_cpu);
> --- a/xen/include/xen/cpumask.h
> +++ b/xen/include/xen/cpumask.h
> @@ -103,11 +103,21 @@ static inline void cpumask_set_cpu(int c
>       set_bit(cpumask_check(cpu), dstp->bits);
>  }
>  
> +static inline void __cpumask_set_cpu(int cpu, cpumask_t *dstp)
> +{
> +     __set_bit(cpumask_check(cpu), dstp->bits);
> +}
> +
>  static inline void cpumask_clear_cpu(int cpu, volatile cpumask_t *dstp)
>  {
>       clear_bit(cpumask_check(cpu), dstp->bits);
>  }
>  
> +static inline void __cpumask_clear_cpu(int cpu, cpumask_t *dstp)
> +{
> +     __clear_bit(cpumask_check(cpu), dstp->bits);
> +}
> +
>  static inline void cpumask_setall(cpumask_t *dstp)
>  {
>       bitmap_fill(dstp->bits, nr_cpumask_bits);
> @@ -122,16 +132,26 @@ static inline void cpumask_clear(cpumask
>  #define cpumask_test_cpu(cpu, cpumask) \
>       test_bit(cpumask_check(cpu), (cpumask)->bits)
>  
> -static inline int cpumask_test_and_set_cpu(int cpu, cpumask_t *addr)
> +static inline int cpumask_test_and_set_cpu(int cpu, volatile cpumask_t *addr)
>  {
>       return test_and_set_bit(cpumask_check(cpu), addr->bits);
>  }
>  
> -static inline int cpumask_test_and_clear_cpu(int cpu, cpumask_t *addr)
> +static inline int __cpumask_test_and_set_cpu(int cpu, cpumask_t *addr)
> +{
> +     return __test_and_set_bit(cpumask_check(cpu), addr->bits);
> +}
> +
> +static inline int cpumask_test_and_clear_cpu(int cpu, volatile cpumask_t 
> *addr)
>  {
>       return test_and_clear_bit(cpumask_check(cpu), addr->bits);
>  }
>  
> +static inline int __cpumask_test_and_clear_cpu(int cpu, cpumask_t *addr)
> +{
> +     return __test_and_clear_bit(cpumask_check(cpu), addr->bits);
> +}
> +
>  static inline void cpumask_and(cpumask_t *dstp, const cpumask_t *src1p,
>                              const cpumask_t *src2p)
>  {
>
>
>
>
> _______________________________________________
> Xen-devel mailing list
> Xen-devel@lists.xen.org
> http://lists.xen.org/xen-devel

_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xen.org
http://lists.xen.org/xen-devel

Reply via email to