From: Alex Shi <al...@kernel.org>

A new step to replace pgtable_t by ptdesc, also a preparation to change
vmf.prealloc_pte to ptdesc too.

Signed-off-by: Alex Shi <al...@kernel.org>
Cc: linux-ker...@vger.kernel.org
Cc: linux...@kvack.org
Cc: linux-fsde...@vger.kernel.org
Cc: Andrew Morton <a...@linux-foundation.org>
Cc: Matthew Wilcox  <wi...@infradead.org>
---
 mm/filemap.c  | 2 +-
 mm/internal.h | 2 +-
 mm/memory.c   | 8 ++++----
 3 files changed, 6 insertions(+), 6 deletions(-)

diff --git a/mm/filemap.c b/mm/filemap.c
index d62150418b91..3708ef71182e 100644
--- a/mm/filemap.c
+++ b/mm/filemap.c
@@ -3453,7 +3453,7 @@ static bool filemap_map_pmd(struct vm_fault *vmf, struct 
folio *folio,
        }
 
        if (pmd_none(*vmf->pmd) && vmf->prealloc_pte)
-               pmd_install(mm, vmf->pmd, &vmf->prealloc_pte);
+               pmd_install(mm, vmf->pmd, (struct ptdesc **)&vmf->prealloc_pte);
 
        return false;
 }
diff --git a/mm/internal.h b/mm/internal.h
index 7a3bcc6d95e7..e4bc64d5176a 100644
--- a/mm/internal.h
+++ b/mm/internal.h
@@ -320,7 +320,7 @@ void folio_activate(struct folio *folio);
 void free_pgtables(struct mmu_gather *tlb, struct ma_state *mas,
                   struct vm_area_struct *start_vma, unsigned long floor,
                   unsigned long ceiling, bool mm_wr_locked);
-void pmd_install(struct mm_struct *mm, pmd_t *pmd, pgtable_t *pte);
+void pmd_install(struct mm_struct *mm, pmd_t *pmd, struct ptdesc **pte);
 
 struct zap_details;
 void unmap_page_range(struct mmu_gather *tlb,
diff --git a/mm/memory.c b/mm/memory.c
index cbed8824059f..79685600d23f 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -418,7 +418,7 @@ void free_pgtables(struct mmu_gather *tlb, struct ma_state 
*mas,
        } while (vma);
 }
 
-void pmd_install(struct mm_struct *mm, pmd_t *pmd, pgtable_t *pte)
+void pmd_install(struct mm_struct *mm, pmd_t *pmd, struct ptdesc **pte)
 {
        spinlock_t *ptl = pmd_lock(mm, pmd);
 
@@ -438,7 +438,7 @@ void pmd_install(struct mm_struct *mm, pmd_t *pmd, 
pgtable_t *pte)
                 * smp_rmb() barriers in page table walking code.
                 */
                smp_wmb(); /* Could be smp_wmb__xxx(before|after)_spin_lock */
-               pmd_populate(mm, pmd, (struct ptdesc *)(*pte));
+               pmd_populate(mm, pmd, *pte);
                *pte = NULL;
        }
        spin_unlock(ptl);
@@ -450,7 +450,7 @@ int __pte_alloc(struct mm_struct *mm, pmd_t *pmd)
        if (!ptdesc)
                return -ENOMEM;
 
-       pmd_install(mm, pmd, (pgtable_t *)&ptdesc);
+       pmd_install(mm, pmd, &ptdesc);
        if (ptdesc)
                pte_free(mm, ptdesc);
        return 0;
@@ -4868,7 +4868,7 @@ vm_fault_t finish_fault(struct vm_fault *vmf)
                }
 
                if (vmf->prealloc_pte)
-                       pmd_install(vma->vm_mm, vmf->pmd, &vmf->prealloc_pte);
+                       pmd_install(vma->vm_mm, vmf->pmd, (struct ptdesc 
**)&vmf->prealloc_pte);
                else if (unlikely(pte_alloc(vma->vm_mm, vmf->pmd)))
                        return VM_FAULT_OOM;
        }
-- 
2.43.0

Reply via email to