Looks much nicer indeed, applied, thanks! Damien Zammit, le jeu. 22 févr. 2024 08:24:39 +0000, a ecrit: > This prevents a deadlock in smp where a read lock on the map > is taken in gsync and then the map is locked again inside > vm_map_lookup() but another thread had a pre-existing write lock, > therefore the second read lock blocks. > > This is fixed by removing the initial gsync read lock on the map > but keeping the read lock held upon returning from vm_map_lookup(). > > Co-Authored-By: Sergey Bugaev <buga...@gmail.com> > --- > kern/gsync.c | 19 ++++++------------- > 1 file changed, 6 insertions(+), 13 deletions(-) > > diff --git a/kern/gsync.c b/kern/gsync.c > index 19190349..31b564ca 100644 > --- a/kern/gsync.c > +++ b/kern/gsync.c > @@ -134,11 +134,12 @@ probe_address (vm_map_t map, vm_offset_t addr, > vm_prot_t rprot; > boolean_t wired_p; > > - if (vm_map_lookup (&map, addr, prot, FALSE, &ver, > + if (vm_map_lookup (&map, addr, prot, TRUE, &ver, > &vap->obj, &vap->off, &rprot, &wired_p) != KERN_SUCCESS) > return (-1); > else if ((rprot & prot) != prot) > { > + vm_map_unlock_read (map); > vm_object_unlock (vap->obj); > return (-1); > } > @@ -227,18 +228,13 @@ kern_return_t gsync_wait (task_t task, vm_offset_t addr, > else if (addr % sizeof (int) != 0) > return (KERN_INVALID_ADDRESS); > > - vm_map_lock_read (task->map); > - > struct gsync_waiter w; > struct vm_args va; > boolean_t remote = task != current_task (); > int bucket = gsync_prepare_key (task, addr, flags, &w.key, &va); > > if (bucket < 0) > - { > - vm_map_unlock_read (task->map); > - return (KERN_INVALID_ADDRESS); > - } > + return (KERN_INVALID_ADDRESS); > else if (remote) > /* The VM object is returned locked. However, we are about to acquire > * a sleeping lock for a bucket, so we must not hold any simple > @@ -354,17 +350,12 @@ kern_return_t gsync_wake (task_t task, > else if (addr % sizeof (int) != 0) > return (KERN_INVALID_ADDRESS); > > - vm_map_lock_read (task->map); > - > union gsync_key key; > struct vm_args va; > int bucket = gsync_prepare_key (task, addr, flags, &key, &va); > > if (bucket < 0) > - { > - vm_map_unlock_read (task->map); > - return (KERN_INVALID_ADDRESS); > - } > + return (KERN_INVALID_ADDRESS); > else if (current_task () != task && (flags & GSYNC_MUTATE) != 0) > /* See above on why we do this. */ > vm_object_reference_locked (va.obj); > @@ -437,6 +428,7 @@ kern_return_t gsync_requeue (task_t task, vm_offset_t src, > int src_bkt = gsync_prepare_key (task, src, flags, &src_k, &va); > if (src_bkt < 0) > return (KERN_INVALID_ADDRESS); > + vm_map_unlock_read (task->map); > > /* Unlock the VM object before the second lookup. */ > vm_object_unlock (va.obj); > @@ -444,6 +436,7 @@ kern_return_t gsync_requeue (task_t task, vm_offset_t src, > int dst_bkt = gsync_prepare_key (task, dst, flags, &dst_k, &va); > if (dst_bkt < 0) > return (KERN_INVALID_ADDRESS); > + vm_map_unlock_read (task->map); > > /* We never create any temporary mappings in 'requeue', so we > * can unlock the VM object right now. */ > -- > 2.43.0 > > >
-- Samuel --- Pour une évaluation indépendante, transparente et rigoureuse ! Je soutiens la Commission d'Évaluation de l'Inria.