On 21.08.19 17:40, David Hildenbrand wrote:
> No longer in use, let's drop it. We no longer access the zone of
> possibly never onlined memory (and therefore don't read garabage in
> these scenarios).
> 
> Cc: Catalin Marinas <[email protected]>
> Cc: Will Deacon <[email protected]>
> Cc: Tony Luck <[email protected]>
> Cc: Fenghua Yu <[email protected]>
> Cc: Benjamin Herrenschmidt <[email protected]>
> Cc: Paul Mackerras <[email protected]>
> Cc: Michael Ellerman <[email protected]>
> Cc: Heiko Carstens <[email protected]>
> Cc: Vasily Gorbik <[email protected]>
> Cc: Christian Borntraeger <[email protected]>
> Cc: Yoshinori Sato <[email protected]>
> Cc: Rich Felker <[email protected]>
> Cc: Dave Hansen <[email protected]>
> Cc: Andy Lutomirski <[email protected]>
> Cc: Peter Zijlstra <[email protected]>
> Cc: Thomas Gleixner <[email protected]>
> Cc: Ingo Molnar <[email protected]>
> Cc: Borislav Petkov <[email protected]>
> Cc: "H. Peter Anvin" <[email protected]>
> Cc: [email protected]
> Cc: Andrew Morton <[email protected]>
> Cc: Mark Rutland <[email protected]>
> Cc: Steve Capper <[email protected]>
> Cc: Mike Rapoport <[email protected]>
> Cc: Anshuman Khandual <[email protected]>
> Cc: Yu Zhao <[email protected]>
> Cc: Jun Yao <[email protected]>
> Cc: Robin Murphy <[email protected]>
> Cc: Michal Hocko <[email protected]>
> Cc: Oscar Salvador <[email protected]>
> Cc: "Matthew Wilcox (Oracle)" <[email protected]>
> Cc: Christophe Leroy <[email protected]>
> Cc: "Aneesh Kumar K.V" <[email protected]>
> Cc: Pavel Tatashin <[email protected]>
> Cc: Gerald Schaefer <[email protected]>
> Cc: Halil Pasic <[email protected]>
> Cc: Tom Lendacky <[email protected]>
> Cc: Greg Kroah-Hartman <[email protected]>
> Cc: Masahiro Yamada <[email protected]>
> Cc: Dan Williams <[email protected]>
> Cc: Wei Yang <[email protected]>
> Cc: Qian Cai <[email protected]>
> Cc: Jason Gunthorpe <[email protected]>
> Cc: Logan Gunthorpe <[email protected]>
> Cc: Ira Weiny <[email protected]>
> Cc: [email protected]
> Cc: [email protected]
> Cc: [email protected]
> Cc: [email protected]
> Cc: [email protected]
> Signed-off-by: David Hildenbrand <[email protected]>
> ---
>  arch/arm64/mm/mmu.c            | 4 +---
>  arch/ia64/mm/init.c            | 4 +---
>  arch/powerpc/mm/mem.c          | 3 +--
>  arch/s390/mm/init.c            | 4 +---
>  arch/sh/mm/init.c              | 4 +---
>  arch/x86/mm/init_32.c          | 4 +---
>  arch/x86/mm/init_64.c          | 4 +---
>  include/linux/memory_hotplug.h | 4 ++--
>  mm/memory_hotplug.c            | 6 +++---
>  mm/memremap.c                  | 3 +--
>  10 files changed, 13 insertions(+), 27 deletions(-)
> 
> diff --git a/arch/arm64/mm/mmu.c b/arch/arm64/mm/mmu.c
> index e67bab4d613e..b3843aff12bf 100644
> --- a/arch/arm64/mm/mmu.c
> +++ b/arch/arm64/mm/mmu.c
> @@ -1080,7 +1080,6 @@ void arch_remove_memory(int nid, u64 start, u64 size,
>  {
>       unsigned long start_pfn = start >> PAGE_SHIFT;
>       unsigned long nr_pages = size >> PAGE_SHIFT;
> -     struct zone *zone;
>  
>       /*
>        * FIXME: Cleanup page tables (also in arch_add_memory() in case
> @@ -1089,7 +1088,6 @@ void arch_remove_memory(int nid, u64 start, u64 size,
>        * unplug. ARCH_ENABLE_MEMORY_HOTREMOVE must not be
>        * unlocked yet.
>        */
> -     zone = page_zone(pfn_to_page(start_pfn));
> -     __remove_pages(zone, start_pfn, nr_pages, altmap);
> +     __remove_pages(start_pfn, nr_pages, altmap);
>  }
>  #endif
> diff --git a/arch/ia64/mm/init.c b/arch/ia64/mm/init.c
> index bf9df2625bc8..a6dd80a2c939 100644
> --- a/arch/ia64/mm/init.c
> +++ b/arch/ia64/mm/init.c
> @@ -689,9 +689,7 @@ void arch_remove_memory(int nid, u64 start, u64 size,
>  {
>       unsigned long start_pfn = start >> PAGE_SHIFT;
>       unsigned long nr_pages = size >> PAGE_SHIFT;
> -     struct zone *zone;
>  
> -     zone = page_zone(pfn_to_page(start_pfn));
> -     __remove_pages(zone, start_pfn, nr_pages, altmap);
> +     __remove_pages(start_pfn, nr_pages, altmap);
>  }
>  #endif
> diff --git a/arch/powerpc/mm/mem.c b/arch/powerpc/mm/mem.c
> index 9191a66b3bc5..7351c44c435a 100644
> --- a/arch/powerpc/mm/mem.c
> +++ b/arch/powerpc/mm/mem.c
> @@ -130,10 +130,9 @@ void __ref arch_remove_memory(int nid, u64 start, u64 
> size,
>  {
>       unsigned long start_pfn = start >> PAGE_SHIFT;
>       unsigned long nr_pages = size >> PAGE_SHIFT;
> -     struct page *page = pfn_to_page(start_pfn) + vmem_altmap_offset(altmap);
>       int ret;
>  
> -     __remove_pages(page_zone(page), start_pfn, nr_pages, altmap);
> +     __remove_pages(start_pfn, nr_pages, altmap);
>  
>       /* Remove htab bolted mappings for this section of memory */
>       start = (unsigned long)__va(start);
> diff --git a/arch/s390/mm/init.c b/arch/s390/mm/init.c
> index 20340a03ad90..6f13eb66e375 100644
> --- a/arch/s390/mm/init.c
> +++ b/arch/s390/mm/init.c
> @@ -296,10 +296,8 @@ void arch_remove_memory(int nid, u64 start, u64 size,
>  {
>       unsigned long start_pfn = start >> PAGE_SHIFT;
>       unsigned long nr_pages = size >> PAGE_SHIFT;
> -     struct zone *zone;
>  
> -     zone = page_zone(pfn_to_page(start_pfn));
> -     __remove_pages(zone, start_pfn, nr_pages, altmap);
> +     __remove_pages(start_pfn, nr_pages, altmap);
>       vmem_remove_mapping(start, size);
>  }
>  #endif /* CONFIG_MEMORY_HOTPLUG */
> diff --git a/arch/sh/mm/init.c b/arch/sh/mm/init.c
> index dfdbaa50946e..d1b1ff2be17a 100644
> --- a/arch/sh/mm/init.c
> +++ b/arch/sh/mm/init.c
> @@ -434,9 +434,7 @@ void arch_remove_memory(int nid, u64 start, u64 size,
>  {
>       unsigned long start_pfn = PFN_DOWN(start);
>       unsigned long nr_pages = size >> PAGE_SHIFT;
> -     struct zone *zone;
>  
> -     zone = page_zone(pfn_to_page(start_pfn));
> -     __remove_pages(zone, start_pfn, nr_pages, altmap);
> +     __remove_pages(start_pfn, nr_pages, altmap);
>  }
>  #endif /* CONFIG_MEMORY_HOTPLUG */
> diff --git a/arch/x86/mm/init_32.c b/arch/x86/mm/init_32.c
> index 4068abb9427f..9d036be27aaa 100644
> --- a/arch/x86/mm/init_32.c
> +++ b/arch/x86/mm/init_32.c
> @@ -865,10 +865,8 @@ void arch_remove_memory(int nid, u64 start, u64 size,
>  {
>       unsigned long start_pfn = start >> PAGE_SHIFT;
>       unsigned long nr_pages = size >> PAGE_SHIFT;
> -     struct zone *zone;
>  
> -     zone = page_zone(pfn_to_page(start_pfn));
> -     __remove_pages(zone, start_pfn, nr_pages, altmap);
> +     __remove_pages(start_pfn, nr_pages, altmap);
>  }
>  #endif
>  
> diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c
> index a6b5c653727b..b8541d77452c 100644
> --- a/arch/x86/mm/init_64.c
> +++ b/arch/x86/mm/init_64.c
> @@ -1212,10 +1212,8 @@ void __ref arch_remove_memory(int nid, u64 start, u64 
> size,
>  {
>       unsigned long start_pfn = start >> PAGE_SHIFT;
>       unsigned long nr_pages = size >> PAGE_SHIFT;
> -     struct page *page = pfn_to_page(start_pfn) + vmem_altmap_offset(altmap);
> -     struct zone *zone = page_zone(page);
>  
> -     __remove_pages(zone, start_pfn, nr_pages, altmap);
> +     __remove_pages(start_pfn, nr_pages, altmap);
>       kernel_physical_mapping_remove(start, start + size);
>  }
>  #endif /* CONFIG_MEMORY_HOTPLUG */
> diff --git a/include/linux/memory_hotplug.h b/include/linux/memory_hotplug.h
> index f46ea71b4ffd..f75d9483864f 100644
> --- a/include/linux/memory_hotplug.h
> +++ b/include/linux/memory_hotplug.h
> @@ -125,8 +125,8 @@ static inline bool movable_node_is_enabled(void)
>  
>  extern void arch_remove_memory(int nid, u64 start, u64 size,
>                              struct vmem_altmap *altmap);
> -extern void __remove_pages(struct zone *zone, unsigned long start_pfn,
> -                        unsigned long nr_pages, struct vmem_altmap *altmap);
> +extern void __remove_pages(unsigned long start_pfn, unsigned long nr_pages,
> +                        struct vmem_altmap *altmap);
>  
>  /* reasonably generic interface to expand the physical pages */
>  extern int __add_pages(int nid, unsigned long start_pfn, unsigned long 
> nr_pages,
> diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c
> index e88c96cf9d77..7a9719a762fe 100644
> --- a/mm/memory_hotplug.c
> +++ b/mm/memory_hotplug.c
> @@ -532,7 +532,6 @@ static void __remove_section(unsigned long pfn, unsigned 
> long nr_pages,
>  
>  /**
>   * __remove_pages() - remove sections of pages from a zone
> - * @zone: zone from which pages need to be removed
>   * @pfn: starting pageframe (must be aligned to start of a section)
>   * @nr_pages: number of pages to remove (must be multiple of section size)
>   * @altmap: alternative device page map or %NULL if default memmap is used
> @@ -542,12 +541,13 @@ static void __remove_section(unsigned long pfn, 
> unsigned long nr_pages,
>   * sure that pages are marked reserved and zones are adjust properly by
>   * calling offline_pages().
>   */
> -void __remove_pages(struct zone *zone, unsigned long pfn,
> -                 unsigned long nr_pages, struct vmem_altmap *altmap)
> +void __remove_pages(unsigned long pfn, unsigned long nr_pages,
> +                 struct vmem_altmap *altmap)
>  {
>       const unsigned long end_pfn = pfn + nr_pages;
>       unsigned long cur_nr_pages;
>       unsigned long map_offset = 0;
> +     struct zone *zone;
>  
>       if (check_pfn_span(pfn, nr_pages, "remove"))
>               return;
> diff --git a/mm/memremap.c b/mm/memremap.c
> index 8a394552b5bd..7e34f42e5f5a 100644
> --- a/mm/memremap.c
> +++ b/mm/memremap.c
> @@ -138,8 +138,7 @@ static void devm_memremap_pages_release(void *data)
>       mem_hotplug_begin();
>       if (pgmap->type == MEMORY_DEVICE_PRIVATE) {
>               pfn = PHYS_PFN(res->start);
> -             __remove_pages(page_zone(pfn_to_page(pfn)), pfn,
> -                              PHYS_PFN(resource_size(res)), NULL);
> +             __remove_pages(pfn, PHYS_PFN(resource_size(res)), NULL);
>       } else {
>               arch_remove_memory(nid, res->start, resource_size(res),
>                               pgmap_altmap(pgmap));
> 

Thinking about it, as outlined in the cover letter, passing the nid
instead of the zone here and using the nid to further reduce the number
of zones to process might make sense. But let's wait for feedback first.

-- 

Thanks,

David / dhildenb

Reply via email to