On 21.06.19 02:06, Dan Williams wrote:
> David points out that there is a mixture of 'int' and 'unsigned long'
> usage for section number data types. Update the memory hotplug path to
> use 'unsigned long' consistently for section numbers.
> 
> Cc: Michal Hocko <mho...@suse.com>
> Cc: Oscar Salvador <osalva...@suse.de>
> Reported-by: David Hildenbrand <da...@redhat.com>
> Signed-off-by: Dan Williams <dan.j.willi...@intel.com>
> ---
> Hi Andrew,
> 
> This patch belatedly fixes up David's review feedback about moving over
> to 'unsigned long' for section numbers. Let me know if you want me to
> respin the full series, or if you'll just apply / fold this patch on
> top.
> 
>  mm/memory_hotplug.c |   10 +++++-----
>  mm/sparse.c         |    8 ++++----
>  2 files changed, 9 insertions(+), 9 deletions(-)
> 
> diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c
> index 4e8e65954f31..92bc44a73fc5 100644
> --- a/mm/memory_hotplug.c
> +++ b/mm/memory_hotplug.c
> @@ -288,8 +288,8 @@ static int check_pfn_span(unsigned long pfn, unsigned 
> long nr_pages,
>  int __ref __add_pages(int nid, unsigned long pfn, unsigned long nr_pages,
>               struct mhp_restrictions *restrictions)
>  {
> -     unsigned long i;
> -     int start_sec, end_sec, err;
> +     int err;
> +     unsigned long nr, start_sec, end_sec;
>       struct vmem_altmap *altmap = restrictions->altmap;
>  
>       if (altmap) {
> @@ -310,7 +310,7 @@ int __ref __add_pages(int nid, unsigned long pfn, 
> unsigned long nr_pages,
>  
>       start_sec = pfn_to_section_nr(pfn);
>       end_sec = pfn_to_section_nr(pfn + nr_pages - 1);
> -     for (i = start_sec; i <= end_sec; i++) {
> +     for (nr = start_sec; nr <= end_sec; nr++) {
>               unsigned long pfns;
>  
>               pfns = min(nr_pages, PAGES_PER_SECTION
> @@ -541,7 +541,7 @@ void __remove_pages(struct zone *zone, unsigned long pfn,
>                   unsigned long nr_pages, struct vmem_altmap *altmap)
>  {
>       unsigned long map_offset = 0;
> -     int i, start_sec, end_sec;
> +     unsigned long nr, start_sec, end_sec;
>  
>       if (altmap)
>               map_offset = vmem_altmap_offset(altmap);
> @@ -553,7 +553,7 @@ void __remove_pages(struct zone *zone, unsigned long pfn,
>  
>       start_sec = pfn_to_section_nr(pfn);
>       end_sec = pfn_to_section_nr(pfn + nr_pages - 1);
> -     for (i = start_sec; i <= end_sec; i++) {
> +     for (nr = start_sec; nr <= end_sec; nr++) {
>               unsigned long pfns;
>  
>               cond_resched();
> diff --git a/mm/sparse.c b/mm/sparse.c
> index b77ca21a27a4..6c4eab2b2bb0 100644
> --- a/mm/sparse.c
> +++ b/mm/sparse.c
> @@ -229,21 +229,21 @@ void subsection_mask_set(unsigned long *map, unsigned 
> long pfn,
>  void __init subsection_map_init(unsigned long pfn, unsigned long nr_pages)
>  {
>       int end_sec = pfn_to_section_nr(pfn + nr_pages - 1);
> -     int i, start_sec = pfn_to_section_nr(pfn);
> +     unsigned long nr, start_sec = pfn_to_section_nr(pfn);
>  
>       if (!nr_pages)
>               return;
>  
> -     for (i = start_sec; i <= end_sec; i++) {
> +     for (nr = start_sec; nr <= end_sec; nr++) {
>               struct mem_section *ms;
>               unsigned long pfns;
>  
>               pfns = min(nr_pages, PAGES_PER_SECTION
>                               - (pfn & ~PAGE_SECTION_MASK));
> -             ms = __nr_to_section(i);
> +             ms = __nr_to_section(nr);
>               subsection_mask_set(ms->usage->subsection_map, pfn, pfns);
>  
> -             pr_debug("%s: sec: %d pfns: %ld set(%d, %d)\n", __func__, i,
> +             pr_debug("%s: sec: %d pfns: %ld set(%d, %d)\n", __func__, nr,
>                               pfns, subsection_map_index(pfn),
>                               subsection_map_index(pfn + pfns - 1));
>  
> 

Thanks Dan!

Reviewed-by: David Hildenbrand <da...@redhat.com>

-- 

Thanks,

David / dhildenb

Reply via email to