Move vma_alloc_folio_noprof() from an inline in gfp.h (for !NUMA)
and mempolicy.c (for NUMA) to page_alloc.c.  The declaration is
moved outside the #ifdef CONFIG_NUMA block so both configs use
the same real function.

On NUMA, it calls the mempolicy allocation path as before.
On !NUMA, it calls folio_alloc_noprof() directly.

This prepares for a subsequent patch that will thread user_addr
through the allocator: having vma_alloc_folio in page_alloc.c
means user_addr can be passed to the internal allocation path
without changing public API signatures or duplicating plumbing
in both gfp.h and mempolicy.c.

No functional change.

Signed-off-by: Michael S. Tsirkin <[email protected]>
Assisted-by: Claude:claude-opus-4-6
---
 include/linux/gfp.h |  9 ++-------
 mm/mempolicy.c      | 17 -----------------
 mm/page_alloc.c     | 28 ++++++++++++++++++++++++++++
 3 files changed, 30 insertions(+), 24 deletions(-)

diff --git a/include/linux/gfp.h b/include/linux/gfp.h
index 51ef13ed756e..7ccbda35b9ad 100644
--- a/include/linux/gfp.h
+++ b/include/linux/gfp.h
@@ -318,13 +318,13 @@ static inline struct page *alloc_pages_node_noprof(int 
nid, gfp_t gfp_mask,
 
 #define  alloc_pages_node(...)                 
alloc_hooks(alloc_pages_node_noprof(__VA_ARGS__))
 
+struct folio *vma_alloc_folio_noprof(gfp_t gfp, int order,
+               struct vm_area_struct *vma, unsigned long addr);
 #ifdef CONFIG_NUMA
 struct page *alloc_pages_noprof(gfp_t gfp, unsigned int order);
 struct folio *folio_alloc_noprof(gfp_t gfp, unsigned int order);
 struct folio *folio_alloc_mpol_noprof(gfp_t gfp, unsigned int order,
                struct mempolicy *mpol, pgoff_t ilx, int nid);
-struct folio *vma_alloc_folio_noprof(gfp_t gfp, int order, struct 
vm_area_struct *vma,
-               unsigned long addr);
 #else
 static inline struct page *alloc_pages_noprof(gfp_t gfp_mask, unsigned int 
order)
 {
@@ -339,11 +339,6 @@ static inline struct folio *folio_alloc_mpol_noprof(gfp_t 
gfp, unsigned int orde
 {
        return folio_alloc_noprof(gfp, order);
 }
-static inline struct folio *vma_alloc_folio_noprof(gfp_t gfp, int order,
-               struct vm_area_struct *vma, unsigned long addr)
-{
-       return folio_alloc_noprof(gfp, order);
-}
 #endif
 
 #define alloc_pages(...)                       
alloc_hooks(alloc_pages_noprof(__VA_ARGS__))
diff --git a/mm/mempolicy.c b/mm/mempolicy.c
index 0e5175f1c767..f0f85c89da82 100644
--- a/mm/mempolicy.c
+++ b/mm/mempolicy.c
@@ -2524,23 +2524,6 @@ struct folio *folio_alloc_mpol_noprof(gfp_t gfp, 
unsigned int order,
  *
  * Return: The folio on success or NULL if allocation fails.
  */
-struct folio *vma_alloc_folio_noprof(gfp_t gfp, int order, struct 
vm_area_struct *vma,
-               unsigned long addr)
-{
-       struct mempolicy *pol;
-       pgoff_t ilx;
-       struct folio *folio;
-
-       if (vma->vm_flags & VM_DROPPABLE)
-               gfp |= __GFP_NOWARN;
-
-       pol = get_vma_policy(vma, addr, order, &ilx);
-       folio = folio_alloc_mpol_noprof(gfp, order, pol, ilx, numa_node_id());
-       mpol_cond_put(pol);
-       return folio;
-}
-EXPORT_SYMBOL(vma_alloc_folio_noprof);
-
 struct page *alloc_frozen_pages_noprof(gfp_t gfp, unsigned order)
 {
        struct mempolicy *pol = &default_policy;
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 2d4b6f1a554e..0e6ec7310087 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -5297,6 +5297,34 @@ struct folio *__folio_alloc_noprof(gfp_t gfp, unsigned 
int order, int preferred_
 }
 EXPORT_SYMBOL(__folio_alloc_noprof);
 
+#ifdef CONFIG_NUMA
+struct folio *vma_alloc_folio_noprof(gfp_t gfp, int order,
+               struct vm_area_struct *vma, unsigned long addr)
+{
+       struct mempolicy *pol;
+       pgoff_t ilx;
+       struct folio *folio;
+
+       if (vma->vm_flags & VM_DROPPABLE)
+               gfp |= __GFP_NOWARN;
+
+       pol = get_vma_policy(vma, addr, order, &ilx);
+       folio = folio_alloc_mpol_noprof(gfp, order, pol, ilx, numa_node_id());
+       mpol_cond_put(pol);
+       return folio;
+}
+#else
+struct folio *vma_alloc_folio_noprof(gfp_t gfp, int order,
+               struct vm_area_struct *vma, unsigned long addr)
+{
+       if (vma->vm_flags & VM_DROPPABLE)
+               gfp |= __GFP_NOWARN;
+
+       return folio_alloc_noprof(gfp, order);
+}
+#endif
+EXPORT_SYMBOL(vma_alloc_folio_noprof);
+
 /*
  * Common helper functions. Never use with __GFP_HIGHMEM because the returned
  * address cannot represent highmem pages. Use alloc_pages and then kmap if
-- 
MST


Reply via email to