Hi all,

please ignore this patch since it includes a build error
i resend the fixed patch in v2 version

i am sorry for my incaution

On 2016/10/11 21:03, zijun_hu wrote:
> From: zijun_hu <zijun...@htc.com>
> 
> as shown by pcpu_build_alloc_info(), the number of units within a percpu
> group is educed by rounding up the number of CPUs within the group to
> @upa boundary, therefore, the number of CPUs isn't equal to the units's
> if it isn't aligned to @upa normally. however, pcpu_page_first_chunk()
> uses BUG_ON() to assert one number is equal the other roughly, so a panic
> is maybe triggered by the BUG_ON() falsely.
> 
> in order to fix this issue, the number of CPUs is rounded up then compared
> with units's, the BUG_ON() is replaced by warning and returning error code
> as well to keep system alive as much as possible.
> 
> Signed-off-by: zijun_hu <zijun...@htc.com>
> ---
>  mm/percpu.c | 16 ++++++++++++----
>  1 file changed, 12 insertions(+), 4 deletions(-)
> 
> diff --git a/mm/percpu.c b/mm/percpu.c
> index 32e2d8d128c1..c2f0d9734d8c 100644
> --- a/mm/percpu.c
> +++ b/mm/percpu.c
> @@ -2095,6 +2095,8 @@ int __init pcpu_page_first_chunk(size_t reserved_size,
>       size_t pages_size;
>       struct page **pages;
>       int unit, i, j, rc;
> +     int upa;
> +     int nr_g0_units;
>  
>       snprintf(psize_str, sizeof(psize_str), "%luK", PAGE_SIZE >> 10);
>  
> @@ -2102,7 +2104,12 @@ int __init pcpu_page_first_chunk(size_t reserved_size,
>       if (IS_ERR(ai))
>               return PTR_ERR(ai);
>       BUG_ON(ai->nr_groups != 1);
> -     BUG_ON(ai->groups[0].nr_units != num_possible_cpus());
> +     upa = ai->alloc_size/ai->unit_size;
> +     g0_nr_units = roundup(num_possible_cpus(), upa);
> +     if (unlikely(WARN_ON(ai->groups[0].nr_units != nr_g0_units))) {
> +             pcpu_free_alloc_info(ai);
> +             return -EINVAL;
> +     }
>  
>       unit_pages = ai->unit_size >> PAGE_SHIFT;
>  
> @@ -2113,21 +2120,22 @@ int __init pcpu_page_first_chunk(size_t reserved_size,
>  
>       /* allocate pages */
>       j = 0;
> -     for (unit = 0; unit < num_possible_cpus(); unit++)
> +     for (unit = 0; unit < num_possible_cpus(); unit++) {
> +             unsigned int cpu = ai->groups[0].cpu_map[unit];
>               for (i = 0; i < unit_pages; i++) {
> -                     unsigned int cpu = ai->groups[0].cpu_map[unit];
>                       void *ptr;
>  
>                       ptr = alloc_fn(cpu, PAGE_SIZE, PAGE_SIZE);
>                       if (!ptr) {
>                               pr_warn("failed to allocate %s page for 
> cpu%u\n",
> -                                     psize_str, cpu);
> +                                             psize_str, cpu);
>                               goto enomem;
>                       }
>                       /* kmemleak tracks the percpu allocations separately */
>                       kmemleak_free(ptr);
>                       pages[j++] = virt_to_page(ptr);
>               }
> +     }
>  
>       /* allocate vm area, map the pages and copy static data */
>       vm.flags = VM_ALLOC;
> 


Reply via email to