On Wed, Mar 20, 2019 at 03:53:01PM +0800, Baoquan He wrote:
> These functions are used allocate/free section memmap, have nothing
> to do with kmalloc/free during the handling. Rename them to remove
> the confusion.
> 
> Signed-off-by: Baoquan He <b...@redhat.com>

Acked-by: Mike Rapoport <r...@linux.ibm.com>

> ---
>  mm/sparse.c | 18 +++++++++---------
>  1 file changed, 9 insertions(+), 9 deletions(-)
> 
> diff --git a/mm/sparse.c b/mm/sparse.c
> index 054b99f74181..374206212d01 100644
> --- a/mm/sparse.c
> +++ b/mm/sparse.c
> @@ -579,13 +579,13 @@ void offline_mem_sections(unsigned long start_pfn, 
> unsigned long end_pfn)
>  #endif
> 
>  #ifdef CONFIG_SPARSEMEM_VMEMMAP
> -static inline struct page *kmalloc_section_memmap(unsigned long pnum, int 
> nid,
> +static inline struct page *alloc_section_memmap(unsigned long pnum, int nid,
>               struct vmem_altmap *altmap)
>  {
>       /* This will make the necessary allocations eventually. */
>       return sparse_mem_map_populate(pnum, nid, altmap);
>  }
> -static void __kfree_section_memmap(struct page *memmap,
> +static void __free_section_memmap(struct page *memmap,
>               struct vmem_altmap *altmap)
>  {
>       unsigned long start = (unsigned long)memmap;
> @@ -603,7 +603,7 @@ static void free_map_bootmem(struct page *memmap)
>  }
>  #endif /* CONFIG_MEMORY_HOTREMOVE */
>  #else
> -static struct page *__kmalloc_section_memmap(void)
> +static struct page *__alloc_section_memmap(void)
>  {
>       struct page *page, *ret;
>       unsigned long memmap_size = sizeof(struct page) * PAGES_PER_SECTION;
> @@ -624,13 +624,13 @@ static struct page *__kmalloc_section_memmap(void)
>       return ret;
>  }
> 
> -static inline struct page *kmalloc_section_memmap(unsigned long pnum, int 
> nid,
> +static inline struct page *alloc_section_memmap(unsigned long pnum, int nid,
>               struct vmem_altmap *altmap)
>  {
> -     return __kmalloc_section_memmap();
> +     return __alloc_section_memmap();
>  }
> 
> -static void __kfree_section_memmap(struct page *memmap,
> +static void __free_section_memmap(struct page *memmap,
>               struct vmem_altmap *altmap)
>  {
>       if (is_vmalloc_addr(memmap))
> @@ -701,7 +701,7 @@ int __meminit sparse_add_one_section(int nid, unsigned 
> long start_pfn,
>       usemap = __kmalloc_section_usemap();
>       if (!usemap)
>               return -ENOMEM;
> -     memmap = kmalloc_section_memmap(section_nr, nid, altmap);
> +     memmap = alloc_section_memmap(section_nr, nid, altmap);
>       if (!memmap) {
>               kfree(usemap);
>               return -ENOMEM;
> @@ -726,7 +726,7 @@ int __meminit sparse_add_one_section(int nid, unsigned 
> long start_pfn,
>  out:
>       if (ret < 0) {
>               kfree(usemap);
> -             __kfree_section_memmap(memmap, altmap);
> +             __free_section_memmap(memmap, altmap);
>       }
>       return ret;
>  }
> @@ -777,7 +777,7 @@ static void free_section_usemap(struct page *memmap, 
> unsigned long *usemap,
>       if (PageSlab(usemap_page) || PageCompound(usemap_page)) {
>               kfree(usemap);
>               if (memmap)
> -                     __kfree_section_memmap(memmap, altmap);
> +                     __free_section_memmap(memmap, altmap);
>               return;
>       }
> 
> -- 
> 2.17.2
> 

-- 
Sincerely yours,
Mike.

Reply via email to