The branch stable/14 has been updated by kib:

URL: 
https://cgit.FreeBSD.org/src/commit/?id=240ddb304ff327e831f75f8ae27f2e0220211b90

commit 240ddb304ff327e831f75f8ae27f2e0220211b90
Author:     Konstantin Belousov <k...@freebsd.org>
AuthorDate: 2024-07-07 23:26:57 +0000
Commit:     Konstantin Belousov <k...@freebsd.org>
CommitDate: 2024-07-21 08:50:29 +0000

    DMAR pagetables pages: handle ref_count special references
    
    (cherry picked from commit 7773023b72de19c6c9a47fd04ac955ec52773f0f)
---
 sys/x86/iommu/intel_idpgtbl.c | 22 ++++++++++++----------
 1 file changed, 12 insertions(+), 10 deletions(-)

diff --git a/sys/x86/iommu/intel_idpgtbl.c b/sys/x86/iommu/intel_idpgtbl.c
index 82cac8bb2d39..deee50858dd2 100644
--- a/sys/x86/iommu/intel_idpgtbl.c
+++ b/sys/x86/iommu/intel_idpgtbl.c
@@ -48,6 +48,7 @@
 #include <sys/tree.h>
 #include <sys/uio.h>
 #include <sys/vmem.h>
+#include <sys/vmmeter.h>
 #include <vm/vm.h>
 #include <vm/vm_extern.h>
 #include <vm/vm_kern.h>
@@ -391,7 +392,7 @@ retry:
                         * pte write and clean while the lock is
                         * dropped.
                         */
-                       m->ref_count++;
+                       vm_page_wire(m);
 
                        sfp = NULL;
                        ptep = domain_pgtbl_map_pte(domain, base, lvl - 1,
@@ -399,7 +400,7 @@ retry:
                        if (ptep == NULL) {
                                KASSERT(m->pindex != 0,
                                    ("loosing root page %p", domain));
-                               m->ref_count--;
+                               vm_page_unwire_noq(m);
                                iommu_pgfree(domain->pgtbl_obj, m->pindex,
                                    flags);
                                return (NULL);
@@ -407,8 +408,8 @@ retry:
                        dmar_pte_store(&ptep->pte, DMAR_PTE_R | DMAR_PTE_W |
                            VM_PAGE_TO_PHYS(m));
                        dmar_flush_pte_to_ram(domain->dmar, ptep);
-                       sf_buf_page(sfp)->ref_count += 1;
-                       m->ref_count--;
+                       vm_page_wire(sf_buf_page(sfp));
+                       vm_page_unwire_noq(m);
                        iommu_unmap_pgtbl(sfp);
                        /* Only executed once. */
                        goto retry;
@@ -487,7 +488,7 @@ domain_map_buf_locked(struct dmar_domain *domain, 
iommu_gaddr_t base,
                dmar_pte_store(&pte->pte, VM_PAGE_TO_PHYS(ma[pi]) | pflags |
                    (superpage ? DMAR_PTE_SP : 0));
                dmar_flush_pte_to_ram(domain->dmar, pte);
-               sf_buf_page(sf)->ref_count += 1;
+               vm_page_wire(sf_buf_page(sf));
        }
        if (sf != NULL)
                iommu_unmap_pgtbl(sf);
@@ -593,8 +594,7 @@ domain_unmap_clear_pte(struct dmar_domain *domain, 
iommu_gaddr_t base, int lvl,
                iommu_unmap_pgtbl(*sf);
                *sf = NULL;
        }
-       m->ref_count--;
-       if (m->ref_count != 0)
+       if (!vm_page_unwire_noq(m))
                return;
        KASSERT(lvl != 0,
            ("lost reference (lvl) on root pg domain %p base %jx lvl %d",
@@ -710,7 +710,7 @@ domain_alloc_pgtbl(struct dmar_domain *domain)
        m = iommu_pgalloc(domain->pgtbl_obj, 0, IOMMU_PGF_WAITOK |
            IOMMU_PGF_ZERO | IOMMU_PGF_OBJL);
        /* No implicit free of the top level page table page. */
-       m->ref_count = 1;
+       vm_page_wire(m);
        DMAR_DOMAIN_PGUNLOCK(domain);
        DMAR_LOCK(domain->dmar);
        domain->iodom.flags |= IOMMU_DOMAIN_PGTBL_INITED;
@@ -742,8 +742,10 @@ domain_free_pgtbl(struct dmar_domain *domain)
 
        /* Obliterate ref_counts */
        VM_OBJECT_ASSERT_WLOCKED(obj);
-       for (m = vm_page_lookup(obj, 0); m != NULL; m = vm_page_next(m))
-               m->ref_count = 0;
+       for (m = vm_page_lookup(obj, 0); m != NULL; m = vm_page_next(m)) {
+               vm_page_clearref(m);
+               vm_wire_sub(1);
+       }
        VM_OBJECT_WUNLOCK(obj);
        vm_object_deallocate(obj);
 }

Reply via email to