This adds a parameter called keep_map_locked to vm_map_lookup() that allows the function to return with the map locked.
This is to prepare for fixing a bug with gsync where the map is locked twice by mistake. Co-Authored-By: Sergey Bugaev <buga...@gmail.com> --- i386/intel/read_fault.c | 4 ++-- kern/gsync.c | 2 +- vm/vm_fault.c | 4 ++-- vm/vm_map.c | 9 ++++++--- vm/vm_map.h | 2 +- 5 files changed, 12 insertions(+), 9 deletions(-) diff --git a/i386/intel/read_fault.c b/i386/intel/read_fault.c index 0b79e3d8..356145e1 100644 --- a/i386/intel/read_fault.c +++ b/i386/intel/read_fault.c @@ -61,7 +61,7 @@ intel_read_fault( * Find the backing store object and offset into it * to begin search. */ - result = vm_map_lookup(&map, vaddr, VM_PROT_READ, &version, + result = vm_map_lookup(&map, vaddr, VM_PROT_READ, FALSE, &version, &object, &offset, &prot, &wired); if (result != KERN_SUCCESS) return (result); @@ -133,7 +133,7 @@ intel_read_fault( vm_offset_t retry_offset; vm_prot_t retry_prot; - result = vm_map_lookup(&map, vaddr, VM_PROT_READ, &version, + result = vm_map_lookup(&map, vaddr, VM_PROT_READ, FALSE, &version, &retry_object, &retry_offset, &retry_prot, &wired); if (result != KERN_SUCCESS) { diff --git a/kern/gsync.c b/kern/gsync.c index e73a6cf0..19190349 100644 --- a/kern/gsync.c +++ b/kern/gsync.c @@ -134,7 +134,7 @@ probe_address (vm_map_t map, vm_offset_t addr, vm_prot_t rprot; boolean_t wired_p; - if (vm_map_lookup (&map, addr, prot, &ver, + if (vm_map_lookup (&map, addr, prot, FALSE, &ver, &vap->obj, &vap->off, &rprot, &wired_p) != KERN_SUCCESS) return (-1); else if ((rprot & prot) != prot) diff --git a/vm/vm_fault.c b/vm/vm_fault.c index c6e28004..d99425a3 100644 --- a/vm/vm_fault.c +++ b/vm/vm_fault.c @@ -1213,7 +1213,7 @@ kern_return_t vm_fault( * it to begin the search. */ - if ((kr = vm_map_lookup(&map, vaddr, fault_type, &version, + if ((kr = vm_map_lookup(&map, vaddr, fault_type, FALSE, &version, &object, &offset, &prot, &wired)) != KERN_SUCCESS) { goto done; @@ -1375,7 +1375,7 @@ kern_return_t vm_fault( * take another fault. */ kr = vm_map_lookup(&map, vaddr, - fault_type & ~VM_PROT_WRITE, &version, + fault_type & ~VM_PROT_WRITE, FALSE, &version, &retry_object, &retry_offset, &retry_prot, &wired); diff --git a/vm/vm_map.c b/vm/vm_map.c index f221c532..47abea1b 100644 --- a/vm/vm_map.c +++ b/vm/vm_map.c @@ -4614,8 +4614,9 @@ vm_map_t vm_map_fork(vm_map_t old_map) * In order to later verify this lookup, a "version" * is returned. * - * The map should not be locked; it will not be - * locked on exit. In order to guarantee the + * The map should not be locked; it will be + * unlocked on exit unless keep_map_locked is set and + * the lookup succeeds. In order to guarantee the * existence of the returned object, it is returned * locked. * @@ -4628,6 +4629,7 @@ kern_return_t vm_map_lookup( vm_map_t *var_map, /* IN/OUT */ vm_offset_t vaddr, vm_prot_t fault_type, + boolean_t keep_map_locked, vm_map_version_t *out_version, /* OUT */ vm_object_t *object, /* OUT */ @@ -4649,7 +4651,8 @@ kern_return_t vm_map_lookup( #define RETURN(why) \ { \ - vm_map_unlock_read(map); \ + if (!(keep_map_locked && (why == KERN_SUCCESS))) \ + vm_map_unlock_read(map); \ return(why); \ } diff --git a/vm/vm_map.h b/vm/vm_map.h index a4949e4e..7e25d9f4 100644 --- a/vm/vm_map.h +++ b/vm/vm_map.h @@ -412,7 +412,7 @@ extern kern_return_t vm_map_inherit(vm_map_t, vm_offset_t, vm_offset_t, vm_inherit_t); /* Look up an address */ -extern kern_return_t vm_map_lookup(vm_map_t *, vm_offset_t, vm_prot_t, +extern kern_return_t vm_map_lookup(vm_map_t *, vm_offset_t, vm_prot_t, boolean_t, vm_map_version_t *, vm_object_t *, vm_offset_t *, vm_prot_t *, boolean_t *); /* Find a map entry */ -- 2.43.0