On Tue, Apr 18, 2017 at 01:11:10PM +0200, Thomas Gleixner wrote:
> +static struct tmigr_group *tmigr_get_group(unsigned int node, unsigned int 
> lvl)
> +{
> +     struct tmigr_group *group;
> +
> +     /* Try to attach to an exisiting group first */
> +     list_for_each_entry(group, &tmigr_level_list[lvl], list) {
> +             /*
> +              * If @lvl is below the cross numa node level, check
> +              * whether this group belongs to the same numa node.
> +              */
> +             if (lvl < tmigr_crossnode_level && group->numa_node != node)
> +                     continue;
> +             /* If the group has capacity, use it */
> +             if (group->num_childs < tmigr_childs_per_group) {
> +                     group->num_childs++;
> +                     return group;
> +             }

This would result in SMT siblings not sharing groups on regular Intel
systems, right? Since they get enumerated last.

> +     }
> +     /* Allocate and set up a new group */
> +     group = kzalloc_node(sizeof(*group), GFP_KERNEL, node);
> +     if (!group)
> +             return ERR_PTR(-ENOMEM);
> +
> +     if (!zalloc_cpumask_var_node(&group->cpus, GFP_KERNEL, node)) {
> +             kfree(group);
> +             return ERR_PTR(-ENOMEM);
> +     }

So if you place that cpumask last, you can do:

        group = kzalloc_node(sizeof(*group) + cpumask_size(),
                             GFP_KERNEL, node);

> +     tmigr_init_group(group, lvl, node);
> +     /* Setup successful. Add it to the hierarchy */
> +     list_add(&group->list, &tmigr_level_list[lvl]);
> +     return group;
> +}

Reply via email to