Hi Srikar, On Thu, Apr 15, 2021 at 05:39:32PM +0530, Srikar Dronamraju wrote: [..snip..]
> @@ -1485,12 +1486,36 @@ static void add_cpu_to_masks(int cpu) > add_cpu_to_smallcore_masks(cpu); > > /* In CPU-hotplug path, hence use GFP_ATOMIC */ > - alloc_cpumask_var_node(&mask, GFP_ATOMIC, cpu_to_node(cpu)); > + ret = alloc_cpumask_var_node(&mask, GFP_ATOMIC, cpu_to_node(cpu)); > update_mask_by_l2(cpu, &mask); > > if (has_coregroup_support()) > update_coregroup_mask(cpu, &mask); > > + if (chip_id == -1 || !ret) { > + cpumask_copy(per_cpu(cpu_core_map, cpu), cpu_cpu_mask(cpu)); > + goto out; > + } > + > + if (shared_caches) > + submask_fn = cpu_l2_cache_mask; > + > + /* Update core_mask with all the CPUs that are part of submask */ > + or_cpumasks_related(cpu, cpu, submask_fn, cpu_core_mask); > If coregroups exist, we can add the cpus of the coregroup to the cpu_core_mask thereby reducing the scope of the for_each_cpu() search below. This will still cut down the time on Baremetal systems supporting coregroups. > + /* Skip all CPUs already part of current CPU core mask */ > + cpumask_andnot(mask, cpu_online_mask, cpu_core_mask(cpu)); > + > + for_each_cpu(i, mask) { > + if (chip_id == cpu_to_chip_id(i)) { > + or_cpumasks_related(cpu, i, submask_fn, cpu_core_mask); > + cpumask_andnot(mask, mask, submask_fn(i)); > + } else { > + cpumask_andnot(mask, mask, cpu_core_mask(i)); > + } > + } > + > +out: > free_cpumask_var(mask); > } > > -- > 2.25.1 >