On Mon, Mar 25, 2024 at 10:56:42PM +0800, Baoquan He wrote:
> Now nobody calls set_dma_reserve() to set value for dma_reserve, remove
> set_dma_reserve(), global variable dma_reserve and the codes using it.
> 
> Signed-off-by: Baoquan He <b...@redhat.com>

Reviewed-by: Mike Rapoport (IBM) <r...@kernel.org>

> ---
>  include/linux/mm.h |  1 -
>  mm/mm_init.c       | 23 -----------------------
>  2 files changed, 24 deletions(-)
> 
> diff --git a/include/linux/mm.h b/include/linux/mm.h
> index 0436b919f1c7..ad19350e1538 100644
> --- a/include/linux/mm.h
> +++ b/include/linux/mm.h
> @@ -3210,7 +3210,6 @@ static inline int early_pfn_to_nid(unsigned long pfn)
>  extern int __meminit early_pfn_to_nid(unsigned long pfn);
>  #endif
>  
> -extern void set_dma_reserve(unsigned long new_dma_reserve);
>  extern void mem_init(void);
>  extern void __init mmap_init(void);
>  
> diff --git a/mm/mm_init.c b/mm/mm_init.c
> index 549e76af8f82..153fb2dc666f 100644
> --- a/mm/mm_init.c
> +++ b/mm/mm_init.c
> @@ -226,7 +226,6 @@ static unsigned long required_movablecore_percent 
> __initdata;
>  
>  static unsigned long nr_kernel_pages __initdata;
>  static unsigned long nr_all_pages __initdata;
> -static unsigned long dma_reserve __initdata;
>  
>  static bool deferred_struct_pages __meminitdata;
>  
> @@ -1583,12 +1582,6 @@ static void __init free_area_init_core(struct 
> pglist_data *pgdat)
>                                       zone_names[j], memmap_pages, freesize);
>               }
>  
> -             /* Account for reserved pages */
> -             if (j == 0 && freesize > dma_reserve) {
> -                     freesize -= dma_reserve;
> -                     pr_debug("  %s zone: %lu pages reserved\n", 
> zone_names[0], dma_reserve);
> -             }
> -
>               if (!is_highmem_idx(j))
>                       nr_kernel_pages += freesize;
>               /* Charge for highmem memmap if there are enough kernel pages */
> @@ -2547,22 +2540,6 @@ void *__init alloc_large_system_hash(const char 
> *tablename,
>       return table;
>  }
>  
> -/**
> - * set_dma_reserve - set the specified number of pages reserved in the first 
> zone
> - * @new_dma_reserve: The number of pages to mark reserved
> - *
> - * The per-cpu batchsize and zone watermarks are determined by managed_pages.
> - * In the DMA zone, a significant percentage may be consumed by kernel image
> - * and other unfreeable allocations which can skew the watermarks badly. This
> - * function may optionally be used to account for unfreeable pages in the
> - * first zone (e.g., ZONE_DMA). The effect will be lower watermarks and
> - * smaller per-cpu batchsize.
> - */
> -void __init set_dma_reserve(unsigned long new_dma_reserve)
> -{
> -     dma_reserve = new_dma_reserve;
> -}
> -
>  void __init memblock_free_pages(struct page *page, unsigned long pfn,
>                                                       unsigned int order)
>  {
> -- 
> 2.41.0
> 
> 

-- 
Sincerely yours,
Mike.

Reply via email to