DAX was the only thing that created pmd_devmap and pud_devmap entries
however it no longer does as DAX pages are now refcounted normally and
pXd_trans_huge() returns true for those. Therefore checking both pXd_devmap
and pXd_trans_huge() is redundant and the former can be removed without
changing behaviour as it will always be false.

Signed-off-by: Alistair Popple <apop...@nvidia.com>
---
 fs/dax.c                   |  5 ++---
 include/linux/huge_mm.h    | 10 ++++------
 include/linux/pgtable.h    |  2 +-
 mm/hmm.c                   |  4 ++--
 mm/huge_memory.c           | 31 +++++++++----------------------
 mm/mapping_dirty_helpers.c |  4 ++--
 mm/memory.c                | 15 ++++++---------
 mm/migrate_device.c        |  2 +-
 mm/mprotect.c              |  2 +-
 mm/mremap.c                |  5 ++---
 mm/page_vma_mapped.c       |  5 ++---
 mm/pagewalk.c              |  8 +++-----
 mm/pgtable-generic.c       |  7 +++----
 mm/userfaultfd.c           |  4 ++--
 mm/vmscan.c                |  3 ---
 15 files changed, 40 insertions(+), 67 deletions(-)

diff --git a/fs/dax.c b/fs/dax.c
index cf96f3d..e26fb6b 100644
--- a/fs/dax.c
+++ b/fs/dax.c
@@ -1932,7 +1932,7 @@ static vm_fault_t dax_iomap_pte_fault(struct vm_fault 
*vmf, pfn_t *pfnp,
         * the PTE we need to set up.  If so just return and the fault will be
         * retried.
         */
-       if (pmd_trans_huge(*vmf->pmd) || pmd_devmap(*vmf->pmd)) {
+       if (pmd_trans_huge(*vmf->pmd)) {
                ret = VM_FAULT_NOPAGE;
                goto unlock_entry;
        }
@@ -2053,8 +2053,7 @@ static vm_fault_t dax_iomap_pmd_fault(struct vm_fault 
*vmf, pfn_t *pfnp,
         * the PMD we need to set up.  If so just return and the fault will be
         * retried.
         */
-       if (!pmd_none(*vmf->pmd) && !pmd_trans_huge(*vmf->pmd) &&
-                       !pmd_devmap(*vmf->pmd)) {
+       if (!pmd_none(*vmf->pmd) && !pmd_trans_huge(*vmf->pmd)) {
                ret = 0;
                goto unlock_entry;
        }
diff --git a/include/linux/huge_mm.h b/include/linux/huge_mm.h
index 22bc207..f427053 100644
--- a/include/linux/huge_mm.h
+++ b/include/linux/huge_mm.h
@@ -370,8 +370,7 @@ void __split_huge_pmd(struct vm_area_struct *vma, pmd_t 
*pmd,
 #define split_huge_pmd(__vma, __pmd, __address)                                
\
        do {                                                            \
                pmd_t *____pmd = (__pmd);                               \
-               if (is_swap_pmd(*____pmd) || pmd_trans_huge(*____pmd)   \
-                                       || pmd_devmap(*____pmd))        \
+               if (is_swap_pmd(*____pmd) || pmd_trans_huge(*____pmd))  \
                        __split_huge_pmd(__vma, __pmd, __address,       \
                                                false, NULL);           \
        }  while (0)
@@ -397,8 +396,7 @@ change_huge_pud(struct mmu_gather *tlb, struct 
vm_area_struct *vma,
 #define split_huge_pud(__vma, __pud, __address)                                
\
        do {                                                            \
                pud_t *____pud = (__pud);                               \
-               if (pud_trans_huge(*____pud)                            \
-                                       || pud_devmap(*____pud))        \
+               if (pud_trans_huge(*____pud))                           \
                        __split_huge_pud(__vma, __pud, __address);      \
        }  while (0)
 
@@ -421,7 +419,7 @@ static inline int is_swap_pmd(pmd_t pmd)
 static inline spinlock_t *pmd_trans_huge_lock(pmd_t *pmd,
                struct vm_area_struct *vma)
 {
-       if (is_swap_pmd(*pmd) || pmd_trans_huge(*pmd) || pmd_devmap(*pmd))
+       if (is_swap_pmd(*pmd) || pmd_trans_huge(*pmd))
                return __pmd_trans_huge_lock(pmd, vma);
        else
                return NULL;
@@ -429,7 +427,7 @@ static inline spinlock_t *pmd_trans_huge_lock(pmd_t *pmd,
 static inline spinlock_t *pud_trans_huge_lock(pud_t *pud,
                struct vm_area_struct *vma)
 {
-       if (pud_trans_huge(*pud) || pud_devmap(*pud))
+       if (pud_trans_huge(*pud))
                return __pud_trans_huge_lock(pud, vma);
        else
                return NULL;
diff --git a/include/linux/pgtable.h b/include/linux/pgtable.h
index 94d267d..00e4a06 100644
--- a/include/linux/pgtable.h
+++ b/include/linux/pgtable.h
@@ -1635,7 +1635,7 @@ static inline int pud_trans_unstable(pud_t *pud)
        defined(CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD)
        pud_t pudval = READ_ONCE(*pud);
 
-       if (pud_none(pudval) || pud_trans_huge(pudval) || pud_devmap(pudval))
+       if (pud_none(pudval) || pud_trans_huge(pudval))
                return 1;
        if (unlikely(pud_bad(pudval))) {
                pud_clear_bad(pud);
diff --git a/mm/hmm.c b/mm/hmm.c
index 9e43008..5037f98 100644
--- a/mm/hmm.c
+++ b/mm/hmm.c
@@ -348,7 +348,7 @@ static int hmm_vma_walk_pmd(pmd_t *pmdp,
                return hmm_pfns_fill(start, end, range, HMM_PFN_ERROR);
        }
 
-       if (pmd_devmap(pmd) || pmd_trans_huge(pmd)) {
+       if (pmd_trans_huge(pmd)) {
                /*
                 * No need to take pmd_lock here, even if some other thread
                 * is splitting the huge pmd we will get that event through
@@ -359,7 +359,7 @@ static int hmm_vma_walk_pmd(pmd_t *pmdp,
                 * values.
                 */
                pmd = pmdp_get_lockless(pmdp);
-               if (!pmd_devmap(pmd) && !pmd_trans_huge(pmd))
+               if (!pmd_trans_huge(pmd))
                        goto again;
 
                return hmm_vma_handle_pmd(walk, addr, end, hmm_pfns, pmd);
diff --git a/mm/huge_memory.c b/mm/huge_memory.c
index a87f7a2..1962b8e 100644
--- a/mm/huge_memory.c
+++ b/mm/huge_memory.c
@@ -1400,10 +1400,7 @@ static int insert_pfn_pmd(struct vm_area_struct *vma, 
unsigned long addr,
        }
 
        entry = pmd_mkhuge(pfn_t_pmd(pfn, prot));
-       if (pfn_t_devmap(pfn))
-               entry = pmd_mkdevmap(entry);
-       else
-               entry = pmd_mkspecial(entry);
+       entry = pmd_mkspecial(entry);
        if (write) {
                entry = pmd_mkyoung(pmd_mkdirty(entry));
                entry = maybe_pmd_mkwrite(entry, vma);
@@ -1443,8 +1440,6 @@ vm_fault_t vmf_insert_pfn_pmd(struct vm_fault *vmf, pfn_t 
pfn, bool write)
         * but we need to be consistent with PTEs and architectures that
         * can't support a 'special' bit.
         */
-       BUG_ON(!(vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP)) &&
-                       !pfn_t_devmap(pfn));
        BUG_ON((vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP)) ==
                                                (VM_PFNMAP|VM_MIXEDMAP));
        BUG_ON((vma->vm_flags & VM_PFNMAP) && is_cow_mapping(vma->vm_flags));
@@ -1537,10 +1532,7 @@ static void insert_pfn_pud(struct vm_area_struct *vma, 
unsigned long addr,
        }
 
        entry = pud_mkhuge(pfn_t_pud(pfn, prot));
-       if (pfn_t_devmap(pfn))
-               entry = pud_mkdevmap(entry);
-       else
-               entry = pud_mkspecial(entry);
+       entry = pud_mkspecial(entry);
        if (write) {
                entry = pud_mkyoung(pud_mkdirty(entry));
                entry = maybe_pud_mkwrite(entry, vma);
@@ -1571,8 +1563,6 @@ vm_fault_t vmf_insert_pfn_pud(struct vm_fault *vmf, pfn_t 
pfn, bool write)
         * but we need to be consistent with PTEs and architectures that
         * can't support a 'special' bit.
         */
-       BUG_ON(!(vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP)) &&
-                       !pfn_t_devmap(pfn));
        BUG_ON((vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP)) ==
                                                (VM_PFNMAP|VM_MIXEDMAP));
        BUG_ON((vma->vm_flags & VM_PFNMAP) && is_cow_mapping(vma->vm_flags));
@@ -1799,7 +1789,7 @@ int copy_huge_pud(struct mm_struct *dst_mm, struct 
mm_struct *src_mm,
 
        ret = -EAGAIN;
        pud = *src_pud;
-       if (unlikely(!pud_trans_huge(pud) && !pud_devmap(pud)))
+       if (unlikely(!pud_trans_huge(pud)))
                goto out_unlock;
 
        /*
@@ -2653,8 +2643,7 @@ spinlock_t *__pmd_trans_huge_lock(pmd_t *pmd, struct 
vm_area_struct *vma)
 {
        spinlock_t *ptl;
        ptl = pmd_lock(vma->vm_mm, pmd);
-       if (likely(is_swap_pmd(*pmd) || pmd_trans_huge(*pmd) ||
-                       pmd_devmap(*pmd)))
+       if (likely(is_swap_pmd(*pmd) || pmd_trans_huge(*pmd)))
                return ptl;
        spin_unlock(ptl);
        return NULL;
@@ -2671,7 +2660,7 @@ spinlock_t *__pud_trans_huge_lock(pud_t *pud, struct 
vm_area_struct *vma)
        spinlock_t *ptl;
 
        ptl = pud_lock(vma->vm_mm, pud);
-       if (likely(pud_trans_huge(*pud) || pud_devmap(*pud)))
+       if (likely(pud_trans_huge(*pud)))
                return ptl;
        spin_unlock(ptl);
        return NULL;
@@ -2723,7 +2712,7 @@ static void __split_huge_pud_locked(struct vm_area_struct 
*vma, pud_t *pud,
        VM_BUG_ON(haddr & ~HPAGE_PUD_MASK);
        VM_BUG_ON_VMA(vma->vm_start > haddr, vma);
        VM_BUG_ON_VMA(vma->vm_end < haddr + HPAGE_PUD_SIZE, vma);
-       VM_BUG_ON(!pud_trans_huge(*pud) && !pud_devmap(*pud));
+       VM_BUG_ON(!pud_trans_huge(*pud));
 
        count_vm_event(THP_SPLIT_PUD);
 
@@ -2756,7 +2745,7 @@ void __split_huge_pud(struct vm_area_struct *vma, pud_t 
*pud,
                                (address & HPAGE_PUD_MASK) + HPAGE_PUD_SIZE);
        mmu_notifier_invalidate_range_start(&range);
        ptl = pud_lock(vma->vm_mm, pud);
-       if (unlikely(!pud_trans_huge(*pud) && !pud_devmap(*pud)))
+       if (unlikely(!pud_trans_huge(*pud)))
                goto out;
        __split_huge_pud_locked(vma, pud, range.start);
 
@@ -2829,8 +2818,7 @@ static void __split_huge_pmd_locked(struct vm_area_struct 
*vma, pmd_t *pmd,
        VM_BUG_ON(haddr & ~HPAGE_PMD_MASK);
        VM_BUG_ON_VMA(vma->vm_start > haddr, vma);
        VM_BUG_ON_VMA(vma->vm_end < haddr + HPAGE_PMD_SIZE, vma);
-       VM_BUG_ON(!is_pmd_migration_entry(*pmd) && !pmd_trans_huge(*pmd)
-                               && !pmd_devmap(*pmd));
+       VM_BUG_ON(!is_pmd_migration_entry(*pmd) && !pmd_trans_huge(*pmd));
 
        count_vm_event(THP_SPLIT_PMD);
 
@@ -3047,8 +3035,7 @@ void split_huge_pmd_locked(struct vm_area_struct *vma, 
unsigned long address,
         * require a folio to check the PMD against. Otherwise, there
         * is a risk of replacing the wrong folio.
         */
-       if (pmd_trans_huge(*pmd) || pmd_devmap(*pmd) ||
-           is_pmd_migration_entry(*pmd)) {
+       if (pmd_trans_huge(*pmd) || is_pmd_migration_entry(*pmd)) {
                if (folio && folio != pmd_folio(*pmd))
                        return;
                __split_huge_pmd_locked(vma, pmd, address, freeze);
diff --git a/mm/mapping_dirty_helpers.c b/mm/mapping_dirty_helpers.c
index 2f8829b..208b428 100644
--- a/mm/mapping_dirty_helpers.c
+++ b/mm/mapping_dirty_helpers.c
@@ -129,7 +129,7 @@ static int wp_clean_pmd_entry(pmd_t *pmd, unsigned long 
addr, unsigned long end,
        pmd_t pmdval = pmdp_get_lockless(pmd);
 
        /* Do not split a huge pmd, present or migrated */
-       if (pmd_trans_huge(pmdval) || pmd_devmap(pmdval)) {
+       if (pmd_trans_huge(pmdval)) {
                WARN_ON(pmd_write(pmdval) || pmd_dirty(pmdval));
                walk->action = ACTION_CONTINUE;
        }
@@ -152,7 +152,7 @@ static int wp_clean_pud_entry(pud_t *pud, unsigned long 
addr, unsigned long end,
        pud_t pudval = READ_ONCE(*pud);
 
        /* Do not split a huge pud */
-       if (pud_trans_huge(pudval) || pud_devmap(pudval)) {
+       if (pud_trans_huge(pudval)) {
                WARN_ON(pud_write(pudval) || pud_dirty(pudval));
                walk->action = ACTION_CONTINUE;
        }
diff --git a/mm/memory.c b/mm/memory.c
index a527c70..296ef2c 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -682,8 +682,6 @@ struct page *vm_normal_page_pmd(struct vm_area_struct *vma, 
unsigned long addr,
                }
        }
 
-       if (pmd_devmap(pmd))
-               return NULL;
        if (is_huge_zero_pmd(pmd))
                return NULL;
        if (unlikely(pfn > highest_memmap_pfn))
@@ -1226,8 +1224,7 @@ copy_pmd_range(struct vm_area_struct *dst_vma, struct 
vm_area_struct *src_vma,
        src_pmd = pmd_offset(src_pud, addr);
        do {
                next = pmd_addr_end(addr, end);
-               if (is_swap_pmd(*src_pmd) || pmd_trans_huge(*src_pmd)
-                       || pmd_devmap(*src_pmd)) {
+               if (is_swap_pmd(*src_pmd) || pmd_trans_huge(*src_pmd)) {
                        int err;
                        VM_BUG_ON_VMA(next-addr != HPAGE_PMD_SIZE, src_vma);
                        err = copy_huge_pmd(dst_mm, src_mm, dst_pmd, src_pmd,
@@ -1263,7 +1260,7 @@ copy_pud_range(struct vm_area_struct *dst_vma, struct 
vm_area_struct *src_vma,
        src_pud = pud_offset(src_p4d, addr);
        do {
                next = pud_addr_end(addr, end);
-               if (pud_trans_huge(*src_pud) || pud_devmap(*src_pud)) {
+               if (pud_trans_huge(*src_pud)) {
                        int err;
 
                        VM_BUG_ON_VMA(next-addr != HPAGE_PUD_SIZE, src_vma);
@@ -1788,7 +1785,7 @@ static inline unsigned long zap_pmd_range(struct 
mmu_gather *tlb,
        pmd = pmd_offset(pud, addr);
        do {
                next = pmd_addr_end(addr, end);
-               if (is_swap_pmd(*pmd) || pmd_trans_huge(*pmd) || 
pmd_devmap(*pmd)) {
+               if (is_swap_pmd(*pmd) || pmd_trans_huge(*pmd)) {
                        if (next - addr != HPAGE_PMD_SIZE)
                                __split_huge_pmd(vma, pmd, addr, false, NULL);
                        else if (zap_huge_pmd(tlb, vma, pmd, addr)) {
@@ -1830,7 +1827,7 @@ static inline unsigned long zap_pud_range(struct 
mmu_gather *tlb,
        pud = pud_offset(p4d, addr);
        do {
                next = pud_addr_end(addr, end);
-               if (pud_trans_huge(*pud) || pud_devmap(*pud)) {
+               if (pud_trans_huge(*pud)) {
                        if (next - addr != HPAGE_PUD_SIZE) {
                                mmap_assert_locked(tlb->mm);
                                split_huge_pud(vma, pud, addr);
@@ -6000,7 +5997,7 @@ static vm_fault_t __handle_mm_fault(struct vm_area_struct 
*vma,
                pud_t orig_pud = *vmf.pud;
 
                barrier();
-               if (pud_trans_huge(orig_pud) || pud_devmap(orig_pud)) {
+               if (pud_trans_huge(orig_pud)) {
 
                        /*
                         * TODO once we support anonymous PUDs: NUMA case and
@@ -6041,7 +6038,7 @@ static vm_fault_t __handle_mm_fault(struct vm_area_struct 
*vma,
                                pmd_migration_entry_wait(mm, vmf.pmd);
                        return 0;
                }
-               if (pmd_trans_huge(vmf.orig_pmd) || pmd_devmap(vmf.orig_pmd)) {
+               if (pmd_trans_huge(vmf.orig_pmd)) {
                        if (pmd_protnone(vmf.orig_pmd) && 
vma_is_accessible(vma))
                                return do_huge_pmd_numa_page(&vmf);
 
diff --git a/mm/migrate_device.c b/mm/migrate_device.c
index 6771893..49c3984 100644
--- a/mm/migrate_device.c
+++ b/mm/migrate_device.c
@@ -599,7 +599,7 @@ static void migrate_vma_insert_page(struct migrate_vma 
*migrate,
        pmdp = pmd_alloc(mm, pudp, addr);
        if (!pmdp)
                goto abort;
-       if (pmd_trans_huge(*pmdp) || pmd_devmap(*pmdp))
+       if (pmd_trans_huge(*pmdp))
                goto abort;
        if (pte_alloc(mm, pmdp))
                goto abort;
diff --git a/mm/mprotect.c b/mm/mprotect.c
index 1444878..4aec7b2 100644
--- a/mm/mprotect.c
+++ b/mm/mprotect.c
@@ -376,7 +376,7 @@ static inline long change_pmd_range(struct mmu_gather *tlb,
                        goto next;
 
                _pmd = pmdp_get_lockless(pmd);
-               if (is_swap_pmd(_pmd) || pmd_trans_huge(_pmd) || 
pmd_devmap(_pmd)) {
+               if (is_swap_pmd(_pmd) || pmd_trans_huge(_pmd)) {
                        if ((next - addr != HPAGE_PMD_SIZE) ||
                            pgtable_split_needed(vma, cp_flags)) {
                                __split_huge_pmd(vma, pmd, addr, false, NULL);
diff --git a/mm/mremap.c b/mm/mremap.c
index cff7f55..e9cfb0b 100644
--- a/mm/mremap.c
+++ b/mm/mremap.c
@@ -633,7 +633,7 @@ unsigned long move_page_tables(struct vm_area_struct *vma,
                new_pud = alloc_new_pud(vma->vm_mm, vma, new_addr);
                if (!new_pud)
                        break;
-               if (pud_trans_huge(*old_pud) || pud_devmap(*old_pud)) {
+               if (pud_trans_huge(*old_pud)) {
                        if (extent == HPAGE_PUD_SIZE) {
                                move_pgt_entry(HPAGE_PUD, vma, old_addr, 
new_addr,
                                               old_pud, new_pud, 
need_rmap_locks);
@@ -655,8 +655,7 @@ unsigned long move_page_tables(struct vm_area_struct *vma,
                if (!new_pmd)
                        break;
 again:
-               if (is_swap_pmd(*old_pmd) || pmd_trans_huge(*old_pmd) ||
-                   pmd_devmap(*old_pmd)) {
+               if (is_swap_pmd(*old_pmd) || pmd_trans_huge(*old_pmd)) {
                        if (extent == HPAGE_PMD_SIZE &&
                            move_pgt_entry(HPAGE_PMD, vma, old_addr, new_addr,
                                           old_pmd, new_pmd, need_rmap_locks))
diff --git a/mm/page_vma_mapped.c b/mm/page_vma_mapped.c
index 32679be..614150d 100644
--- a/mm/page_vma_mapped.c
+++ b/mm/page_vma_mapped.c
@@ -241,8 +241,7 @@ bool page_vma_mapped_walk(struct page_vma_mapped_walk *pvmw)
                 */
                pmde = pmdp_get_lockless(pvmw->pmd);
 
-               if (pmd_trans_huge(pmde) || is_pmd_migration_entry(pmde) ||
-                   (pmd_present(pmde) && pmd_devmap(pmde))) {
+               if (pmd_trans_huge(pmde) || is_pmd_migration_entry(pmde)) {
                        pvmw->ptl = pmd_lock(mm, pvmw->pmd);
                        pmde = *pvmw->pmd;
                        if (!pmd_present(pmde)) {
@@ -257,7 +256,7 @@ bool page_vma_mapped_walk(struct page_vma_mapped_walk *pvmw)
                                        return not_found(pvmw);
                                return true;
                        }
-                       if (likely(pmd_trans_huge(pmde) || pmd_devmap(pmde))) {
+                       if (likely(pmd_trans_huge(pmde))) {
                                if (pvmw->flags & PVMW_MIGRATION)
                                        return not_found(pvmw);
                                if (!check_pmd(pmd_pfn(pmde), pvmw))
diff --git a/mm/pagewalk.c b/mm/pagewalk.c
index 0dfb9c2..cca170f 100644
--- a/mm/pagewalk.c
+++ b/mm/pagewalk.c
@@ -143,8 +143,7 @@ static int walk_pmd_range(pud_t *pud, unsigned long addr, 
unsigned long end,
                         * We are ONLY installing, so avoid unnecessarily
                         * splitting a present huge page.
                         */
-                       if (pmd_present(*pmd) &&
-                           (pmd_trans_huge(*pmd) || pmd_devmap(*pmd)))
+                       if (pmd_present(*pmd) && pmd_trans_huge(*pmd))
                                continue;
                }
 
@@ -210,8 +209,7 @@ static int walk_pud_range(p4d_t *p4d, unsigned long addr, 
unsigned long end,
                         * We are ONLY installing, so avoid unnecessarily
                         * splitting a present huge page.
                         */
-                       if (pud_present(*pud) &&
-                           (pud_trans_huge(*pud) || pud_devmap(*pud)))
+                       if (pud_present(*pud) && pud_trans_huge(*pud))
                                continue;
                }
 
@@ -872,7 +870,7 @@ struct folio *folio_walk_start(struct folio_walk *fw,
                 * TODO: FW_MIGRATION support for PUD migration entries
                 * once there are relevant users.
                 */
-               if (!pud_present(pud) || pud_devmap(pud) || pud_special(pud)) {
+               if (!pud_present(pud) || pud_special(pud)) {
                        spin_unlock(ptl);
                        goto not_found;
                } else if (!pud_leaf(pud)) {
diff --git a/mm/pgtable-generic.c b/mm/pgtable-generic.c
index 5a882f2..567e2d0 100644
--- a/mm/pgtable-generic.c
+++ b/mm/pgtable-generic.c
@@ -139,8 +139,7 @@ pmd_t pmdp_huge_clear_flush(struct vm_area_struct *vma, 
unsigned long address,
 {
        pmd_t pmd;
        VM_BUG_ON(address & ~HPAGE_PMD_MASK);
-       VM_BUG_ON(pmd_present(*pmdp) && !pmd_trans_huge(*pmdp) &&
-                          !pmd_devmap(*pmdp));
+       VM_BUG_ON(pmd_present(*pmdp) && !pmd_trans_huge(*pmdp));
        pmd = pmdp_huge_get_and_clear(vma->vm_mm, address, pmdp);
        flush_pmd_tlb_range(vma, address, address + HPAGE_PMD_SIZE);
        return pmd;
@@ -153,7 +152,7 @@ pud_t pudp_huge_clear_flush(struct vm_area_struct *vma, 
unsigned long address,
        pud_t pud;
 
        VM_BUG_ON(address & ~HPAGE_PUD_MASK);
-       VM_BUG_ON(!pud_trans_huge(*pudp) && !pud_devmap(*pudp));
+       VM_BUG_ON(!pud_trans_huge(*pudp));
        pud = pudp_huge_get_and_clear(vma->vm_mm, address, pudp);
        flush_pud_tlb_range(vma, address, address + HPAGE_PUD_SIZE);
        return pud;
@@ -293,7 +292,7 @@ pte_t *___pte_offset_map(pmd_t *pmd, unsigned long addr, 
pmd_t *pmdvalp)
                *pmdvalp = pmdval;
        if (unlikely(pmd_none(pmdval) || is_pmd_migration_entry(pmdval)))
                goto nomap;
-       if (unlikely(pmd_trans_huge(pmdval) || pmd_devmap(pmdval)))
+       if (unlikely(pmd_trans_huge(pmdval)))
                goto nomap;
        if (unlikely(pmd_bad(pmdval))) {
                pmd_clear_bad(pmd);
diff --git a/mm/userfaultfd.c b/mm/userfaultfd.c
index cc6dc18..38e88b1 100644
--- a/mm/userfaultfd.c
+++ b/mm/userfaultfd.c
@@ -794,8 +794,8 @@ static __always_inline ssize_t mfill_atomic(struct 
userfaultfd_ctx *ctx,
                 * (This includes the case where the PMD used to be THP and
                 * changed back to none after __pte_alloc().)
                 */
-               if (unlikely(!pmd_present(dst_pmdval) || 
pmd_trans_huge(dst_pmdval) ||
-                            pmd_devmap(dst_pmdval))) {
+               if (unlikely(!pmd_present(dst_pmdval) ||
+                               pmd_trans_huge(dst_pmdval))) {
                        err = -EEXIST;
                        break;
                }
diff --git a/mm/vmscan.c b/mm/vmscan.c
index b7b4b7f..463d045 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -3402,9 +3402,6 @@ static unsigned long get_pmd_pfn(pmd_t pmd, struct 
vm_area_struct *vma, unsigned
        if (!pmd_present(pmd) || is_huge_zero_pmd(pmd))
                return -1;
 
-       if (WARN_ON_ONCE(pmd_devmap(pmd)))
-               return -1;
-
        if (!pmd_young(pmd) && !mm_has_notifiers(vma->vm_mm))
                return -1;
 
-- 
git-series 0.9.1

Reply via email to