The branch stable/14 has been updated by kib:

URL: 
https://cgit.FreeBSD.org/src/commit/?id=e9251d68b206d4ea4be4413c17e008c76dd49e79

commit e9251d68b206d4ea4be4413c17e008c76dd49e79
Author:     Konstantin Belousov <k...@freebsd.org>
AuthorDate: 2023-12-21 00:12:37 +0000
Commit:     Konstantin Belousov <k...@freebsd.org>
CommitDate: 2023-12-29 00:33:21 +0000

    vm_iommu_map()/unmap(): stop transiently wiring already wired pages
    
    (cherry picked from commit 671a00491d7ac9d6663cdc597ff8c13024eda00d)
---
 sys/amd64/vmm/vmm.c | 39 +++++++++++++++++++++++++--------------
 1 file changed, 25 insertions(+), 14 deletions(-)

diff --git a/sys/amd64/vmm/vmm.c b/sys/amd64/vmm/vmm.c
index db81f63c9614..d44adc86252d 100644
--- a/sys/amd64/vmm/vmm.c
+++ b/sys/amd64/vmm/vmm.c
@@ -1044,9 +1044,10 @@ vm_iommu_map(struct vm *vm)
 {
        vm_paddr_t gpa, hpa;
        struct mem_map *mm;
-       void *vp, *cookie;
        int i;
 
+       sx_assert(&vm->mem_segs_lock, SX_LOCKED);
+
        for (i = 0; i < VM_MAX_MEMMAPS; i++) {
                mm = &vm->mem_maps[i];
                if (!sysmem_mapping(vm, mm))
@@ -1060,13 +1061,24 @@ vm_iommu_map(struct vm *vm)
                mm->flags |= VM_MEMMAP_F_IOMMU;
 
                for (gpa = mm->gpa; gpa < mm->gpa + mm->len; gpa += PAGE_SIZE) {
-                       vp = vm_gpa_hold_global(vm, gpa, PAGE_SIZE,
-                           VM_PROT_WRITE, &cookie);
-                       KASSERT(vp != NULL, ("vm(%s) could not map gpa %#lx",
-                           vm_name(vm), gpa));
-                       vm_gpa_release(cookie);
+                       hpa = pmap_extract(vmspace_pmap(vm->vmspace), gpa);
+
+                       /*
+                        * All mappings in the vmm vmspace must be
+                        * present since they are managed by vmm in this way.
+                        * Because we are in pass-through mode, the
+                        * mappings must also be wired.  This implies
+                        * that all pages must be mapped and wired,
+                        * allowing to use pmap_extract() and avoiding the
+                        * need to use vm_gpa_hold_global().
+                        *
+                        * This could change if/when we start
+                        * supporting page faults on IOMMU maps.
+                        */
+                       KASSERT(vm_page_wired(PHYS_TO_VM_PAGE(hpa)),
+                           ("vm_iommu_map: vm %p gpa %jx hpa %jx not wired",
+                           vm, (uintmax_t)gpa, (uintmax_t)hpa));
 
-                       hpa = DMAP_TO_PHYS((uintptr_t)vp);
                        iommu_create_mapping(vm->iommu, gpa, hpa, PAGE_SIZE);
                }
        }
@@ -1079,9 +1091,10 @@ vm_iommu_unmap(struct vm *vm)
 {
        vm_paddr_t gpa;
        struct mem_map *mm;
-       void *vp, *cookie;
        int i;
 
+       sx_assert(&vm->mem_segs_lock, SX_LOCKED);
+
        for (i = 0; i < VM_MAX_MEMMAPS; i++) {
                mm = &vm->mem_maps[i];
                if (!sysmem_mapping(vm, mm))
@@ -1095,12 +1108,10 @@ vm_iommu_unmap(struct vm *vm)
                    mm->gpa, mm->len, mm->flags));
 
                for (gpa = mm->gpa; gpa < mm->gpa + mm->len; gpa += PAGE_SIZE) {
-                       vp = vm_gpa_hold_global(vm, gpa, PAGE_SIZE,
-                           VM_PROT_WRITE, &cookie);
-                       KASSERT(vp != NULL, ("vm(%s) could not map gpa %#lx",
-                           vm_name(vm), gpa));
-                       vm_gpa_release(cookie);
-
+                       KASSERT(vm_page_wired(PHYS_TO_VM_PAGE(pmap_extract(
+                           vmspace_pmap(vm->vmspace), gpa))),
+                           ("vm_iommu_unmap: vm %p gpa %jx not wired",
+                           vm, (uintmax_t)gpa));
                        iommu_remove_mapping(vm->iommu, gpa, PAGE_SIZE);
                }
        }

Reply via email to