Oops, I screwed up the robust low-level lock implementation. Here's an updated patch with that fix.
I forgot to mention it in my previous message, but I left some global cthreads types as they were, even though they could be replaced by plain integers now. I'm not sure if anyone uses them directly, but they were exported, so they remain unchanged.
diff --git a/hurd/Makefile b/hurd/Makefile index 40bfdd9..b7e2ed4 100644 --- a/hurd/Makefile +++ b/hurd/Makefile @@ -60,6 +60,7 @@ routines = hurdstartup hurdinit \ ports-get ports-set hurdports hurdmsg \ errno-loc \ sysvshm \ + hurdlock \ $(sig) $(dtable) $(inlines) port-cleanup report-wait xattr sig = hurdsig hurdfault siginfo hurd-raise preempt-sig \ trampoline longjmp-ts catch-exc exc2signal hurdkill sigunwind \ diff --git a/hurd/hurdlock.c b/hurd/hurdlock.c new file mode 100644 index 0000000..c611de7 --- /dev/null +++ b/hurd/hurdlock.c @@ -0,0 +1,247 @@ +/* Copyright (C) 1999-2016 Free Software Foundation, Inc. + This file is part of the GNU C Library. + + The GNU C Library is free software; you can redistribute it and/or + modify it under the terms of the GNU Lesser General Public + License as published by the Free Software Foundation; either + version 3 of the License, or (at your option) any later version. + + The GNU C Library is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + Lesser General Public License for more details. + + You should have received a copy of the GNU Lesser General Public + License along with the GNU C Library; if not, see + <http://www.gnu.org/licenses/>. */ + +#include "hurdlock.h" +#include <hurd.h> +#include <time.h> +#include <errno.h> + +int lll_qwait (void *ptr, int lo, int hi, int flags) +{ + return (__gsync_wait (__mach_task_self (), + (vm_offset_t)ptr, lo, hi, 0, flags | GSYNC_QUAD)); +} + +int lll_timed_wait (void *ptr, int val, int mlsec, int flags) +{ + return (__gsync_wait (__mach_task_self (), + (vm_offset_t)ptr, val, 0, mlsec, flags | GSYNC_TIMED)); +} + +int lll_timed_qwait (void *ptr, int lo, + int hi, int mlsec, int flags) +{ + return (__gsync_wait (__mach_task_self (), (vm_offset_t)ptr, + lo, hi, mlsec, flags | GSYNC_TIMED | GSYNC_QUAD)); +} + +/* Convert an absolute timeout in nanoseconds to a relative + * timeout in milliseconds. */ +static inline int __attribute__ ((gnu_inline)) +compute_reltime (const struct timespec *abstime, clockid_t clk) +{ + struct timespec ts; + __clock_gettime (clk, &ts); + + ts.tv_sec = abstime->tv_sec - ts.tv_sec; + ts.tv_nsec = abstime->tv_nsec - ts.tv_nsec; + + if (ts.tv_nsec < 0) + { + --ts.tv_sec; + ts.tv_nsec += 1000000000; + } + + return (ts.tv_sec < 0 ? -1 : + (int)(ts.tv_sec * 1000 + ts.tv_nsec / 1000000)); +} + +int __lll_abstimed_wait (void *ptr, int val, + const struct timespec *tsp, int flags, int clk) +{ + int mlsec = compute_reltime (tsp, clk); + return (mlsec < 0 ? KERN_TIMEDOUT : + lll_timed_wait (ptr, val, mlsec, flags)); +} + +int __lll_abstimed_qwait (void *ptr, int lo, int hi, + const struct timespec *tsp, int flags, int clk) +{ + int mlsec = compute_reltime (tsp, clk); + return (mlsec < 0 ? KERN_TIMEDOUT : + lll_timed_qwait (ptr, lo, hi, mlsec, flags)); +} + +int __lll_abstimed_lock (void *ptr, + const struct timespec *tsp, int flags, int clk) +{ + if (lll_trylock (ptr) == 0) + return (0); + + while (1) + { + if (atomic_exchange_acq ((int *)ptr, 2) == 0) + return (0); + else if (tsp->tv_nsec < 0 || tsp->tv_nsec >= 1000000000) + return (EINVAL); + + int mlsec = compute_reltime (tsp, clk); + if (mlsec < 0 || lll_timed_wait (ptr, + 2, mlsec, flags) == KERN_TIMEDOUT) + return (ETIMEDOUT); + } +} + +void lll_set_wake (void *ptr, int val, int flags) +{ + __gsync_wake (__mach_task_self (), + (vm_offset_t)ptr, val, flags | GSYNC_MUTATE); +} + +void lll_requeue (void *src, void *dst, int wake_one, int flags) +{ + __gsync_requeue (__mach_task_self (), (vm_offset_t)src, + (vm_offset_t)dst, (boolean_t)wake_one, flags); +} + +/* Robust locks. */ + +extern int __getpid (void) __attribute__ ((const)); +extern task_t __pid2task (int); + +/* Test if a given process id is still valid. */ +static inline int valid_pid (int pid) +{ + task_t task = __pid2task (pid); + if (task == MACH_PORT_NULL) + return (0); + + __mach_port_deallocate (__mach_task_self (), task); + return (1); +} + +/* Robust locks have currently no support from the kernel; they + * are simply implemented with periodic polling. When sleeping, the + * maximum blocking time is determined by this constant. */ +#define MAX_WAIT_TIME 1500 + +int lll_robust_lock (void *ptr, int flags) +{ + int *iptr = (int *)ptr; + int id = __getpid (); + int wait_time = 25; + unsigned int val; + + /* Try to set the lock word to our PID if it's clear. Otherwise, + * mark it as having waiters. */ + while (1) + { + val = *iptr; + if (!val && atomic_compare_and_exchange_bool_acq (iptr, id, 0) == 0) + return (0); + else if (atomic_compare_and_exchange_bool_acq (iptr, + val | LLL_WAITERS, val) == 0) + break; + } + + for (id |= LLL_WAITERS ; ; ) + { + val = *iptr; + if (!val && atomic_compare_and_exchange_bool_acq (iptr, id, 0) == 0) + return (0); + else if (val && !valid_pid (val & LLL_OWNER_MASK)) + { + if (atomic_compare_and_exchange_bool_acq (iptr, id, val) == 0) + return (EOWNERDEAD); + } + else + { + lll_timed_wait (iptr, val, wait_time, flags); + if (wait_time < MAX_WAIT_TIME) + wait_time <<= 1; + } + } +} + +int __lll_robust_abstimed_lock (void *ptr, + const struct timespec *tsp, int flags, int clk) +{ + int *iptr = (int *)ptr; + int id = __getpid (); + int wait_time = 25; + unsigned int val; + + while (1) + { + val = *iptr; + if (!val && atomic_compare_and_exchange_bool_acq (iptr, id, 0) == 0) + return (0); + else if (atomic_compare_and_exchange_bool_acq (iptr, + val | LLL_WAITERS, val) == 0) + break; + } + + for (id |= LLL_WAITERS ; ; ) + { + val = *iptr; + if (!val && atomic_compare_and_exchange_bool_acq (iptr, id, 0) == 0) + return (0); + else if (val && !valid_pid (val & LLL_OWNER_MASK)) + { + if (atomic_compare_and_exchange_bool_acq (iptr, id, val) == 0) + return (EOWNERDEAD); + } + else + { + int mlsec = compute_reltime (tsp, clk); + if (mlsec < 0) + return (ETIMEDOUT); + else if (mlsec > wait_time) + mlsec = wait_time; + + int res = lll_timed_wait (iptr, val, mlsec, flags); + if (res == KERN_TIMEDOUT) + return (ETIMEDOUT); + else if (wait_time < MAX_WAIT_TIME) + wait_time <<= 1; + } + } +} + +int lll_robust_trylock (void *ptr) +{ + int *iptr = (int *)ptr; + int id = __getpid (); + unsigned int val = *iptr; + + if (!val) + { + if (atomic_compare_and_exchange_bool_acq (iptr, id, 0) == 0) + return (0); + } + else if (!valid_pid (val & LLL_OWNER_MASK) && + atomic_compare_and_exchange_bool_acq (iptr, id, val) == 0) + return (EOWNERDEAD); + + return (EBUSY); +} + +void lll_robust_unlock (void *ptr, int flags) +{ + while (1) + { + unsigned int val = *(unsigned int *)ptr; + if (val & LLL_WAITERS) + { + lll_set_wake (ptr, 0, flags); + break; + } + else if (atomic_compare_and_exchange_bool_rel ((int *)ptr, 0, val) == 0) + break; + } +} + diff --git a/hurd/hurdlock.h b/hurd/hurdlock.h new file mode 100644 index 0000000..3be00b0 --- /dev/null +++ b/hurd/hurdlock.h @@ -0,0 +1,117 @@ +/* Copyright (C) 1999-2016 Free Software Foundation, Inc. + This file is part of the GNU C Library. + + The GNU C Library is free software; you can redistribute it and/or + modify it under the terms of the GNU Lesser General Public + License as published by the Free Software Foundation; either + version 3 of the License, or (at your option) any later version. + + The GNU C Library is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + Lesser General Public License for more details. + + You should have received a copy of the GNU Lesser General Public + License along with the GNU C Library; if not, see + <http://www.gnu.org/licenses/>. */ + +#ifndef _HURD_LOCK_H +#define _HURD_LOCK_H 1 + +#include <lowlevellock.h> + +struct timespec; + +/* Flags for robust locks. */ +#define LLL_WAITERS (1U << 31) +#define LLL_DEAD_OWNER (1U << 30) + +#define LLL_OWNER_MASK ~(LLL_WAITERS | LLL_DEAD_OWNER) + +/* Wait on 64-bit address PTR, without blocking if its contents + * are different from the pair <LO, HI>. */ +extern int lll_qwait (void *__ptr, int __lo, + int __hi, int __flags); + +/* Same as 'lll_wait', but only block for MLSEC milliseconds. */ +extern int lll_timed_wait (void *__ptr, int __val, + int __mlsec, int __flags); + +/* Same as 'lll_qwait', but only block for MLSEC milliseconds. */ +extern int lll_timed_qwait (void *__ptr, int __lo, + int __hi, int __mlsec, int __flags); + +/* Same as 'lll_wait', but only block until TSP elapses, + * using clock CLK. */ +extern int __lll_abstimed_wait (void *__ptr, int __val, + const struct timespec *__tsp, int __flags, int __clk); + +/* Same as 'lll_qwait', but only block until TSP elapses, + * using clock CLK. */ +extern int __lll_abstimed_qwait (void *__ptr, int __lo, int __hi, + const struct timespec *__tsp, int __flags, int __clk); + +/* Same as 'lll_lock', but return with an error if TSP elapses, + * using clock CLK. */ +extern int __lll_abstimed_lock (void *__ptr, + const struct timespec *__tsp, int __flags, int __clk); + +/* Acquire the lock at PTR, but return with an error if + * the process containing the owner thread dies. */ +extern int lll_robust_lock (void *__ptr, int __flags); + +/* Same as 'lll_robust_lock', but only block until TSP + * elapses, using clock CLK. */ +extern int __lll_robust_abstimed_lock (void *__ptr, + const struct timespec *__tsp, int __flags, int __clk); + +/* Same as 'lll_robust_lock', but return with an error + * if the lock cannot be acquired without blocking. */ +extern int lll_robust_trylock (void *__ptr); + +/* Wake one or more threads waiting on address PTR, + * setting its value to VAL before doing so. */ +extern void lll_set_wake (void *__ptr, int __val, int __flags); + +/* Release the robust lock at PTR. */ +extern void lll_robust_unlock (void *__ptr, int __flags); + +/* Rearrange threads waiting on address SRC to instead wait on + * DST, waking one of them if WAIT_ONE is non-zero. */ +extern void lll_requeue (void *__src, void *__dst, + int __wake_one, int __flags); + +/* The following are hacks that allow us to simulate optional + * parameters in C, to avoid having to pass the clock id for + * every one of these calls, defaulting to CLOCK_REALTIME if + * no argument is passed. */ + +#define lll_abstimed_wait(ptr, val, tsp, flags, ...) \ + ({ \ + const clockid_t __clk[] = { CLOCK_REALTIME, ##__VA_ARGS__ }; \ + __lll_abstimed_wait ((ptr), (val), (tsp), (flags), \ + __clk[sizeof (__clk) / sizeof (__clk[0]) - 1]); \ + }) + +#define lll_abstimed_qwait(ptr, lo, hi, tsp, flags, ...) \ + ({ \ + const clockid_t __clk[] = { CLOCK_REALTIME, ##__VA_ARGS__ }; \ + __lll_abstimed_qwait ((ptr), (lo), (hi), (tsp), (flags), \ + __clk[sizeof (__clk) / sizeof (__clk[0]) - 1]); \ + }) + +#define lll_abstimed_lock(ptr, tsp, flags, ...) \ + ({ \ + const clockid_t __clk[] = { CLOCK_REALTIME, ##__VA_ARGS__ }; \ + __lll_abstimed_lock ((ptr), (tsp), (flags), \ + __clk[sizeof (__clk) / sizeof (__clk[0]) - 1]); \ + }) + +#define lll_robust_abstimed_lock(ptr, tsp, flags, ...) \ + ({ \ + const clockid_t __clk[] = { CLOCK_REALTIME, ##__VA_ARGS__ }; \ + __lll_robust_abstimed_lock ((ptr), (tsp), (flags), \ + __clk[sizeof (__clk) / sizeof (__clk[0]) - 1]); \ + }) + +#endif diff --git a/hurd/hurdpid.c b/hurd/hurdpid.c index 3fac897..859a774 100644 --- a/hurd/hurdpid.c +++ b/hurd/hurdpid.c @@ -1,4 +1,4 @@ -/* Copyright (C) 1991-2014 Free Software Foundation, Inc. +/* Copyright (C) 1991-2016 Free Software Foundation, Inc. This file is part of the GNU C Library. The GNU C Library is free software; you can redistribute it and/or @@ -16,6 +16,8 @@ <http://www.gnu.org/licenses/>. */ #include <hurd.h> +#include <lowlevellock.h> + pid_t _hurd_pid, _hurd_ppid, _hurd_pgrp; int _hurd_orphaned; @@ -66,6 +68,7 @@ _S_msg_proc_newids (mach_port_t me, /* Notify any waiting user threads that the id change as been completed. */ ++_hurd_pids_changed_stamp; + lll_wake (&_hurd_pids_changed_stamp, GSYNC_BROADCAST); return 0; } diff --git a/mach/Makefile b/mach/Makefile index 5131e26..6d09687 100644 --- a/mach/Makefile +++ b/mach/Makefile @@ -1,4 +1,4 @@ -# Copyright (C) 1991-2014 Free Software Foundation, Inc. +# Copyright (C) 1991-2016 Free Software Foundation, Inc. # This file is part of the GNU C Library. # The GNU C Library is free software; you can redistribute it and/or @@ -27,7 +27,7 @@ headers = mach_init.h mach.h mach_error.h mach-shortcuts.h mach/mach_traps.h \ $(interface-headers) mach/mach.h mach/mig_support.h mach/error.h \ $(lock-headers) machine-sp.h lock = spin-solid spin-lock mutex-init mutex-solid -lock-headers = lock-intern.h machine-lock.h spin-lock.h +lock-headers = lock-intern.h machine-lock.h spin-lock.h lowlevellock.h routines = $(mach-syscalls) $(mach-shortcuts) \ mach_init mig_strncpy msg \ mig-alloc mig-dealloc mig-reply \ diff --git a/mach/lock-intern.h b/mach/lock-intern.h index 6d315bb..53330dc 100644 --- a/mach/lock-intern.h +++ b/mach/lock-intern.h @@ -1,4 +1,4 @@ -/* Copyright (C) 1994-2014 Free Software Foundation, Inc. +/* Copyright (C) 1994-2016 Free Software Foundation, Inc. This file is part of the GNU C Library. The GNU C Library is free software; you can redistribute it and/or @@ -20,6 +20,7 @@ #include <sys/cdefs.h> #include <machine-lock.h> +#include <lowlevellock.h> #ifndef _EXTERN_INLINE #define _EXTERN_INLINE __extern_inline @@ -34,7 +35,7 @@ void __spin_lock_init (__spin_lock_t *__lock); _EXTERN_INLINE void __spin_lock_init (__spin_lock_t *__lock) { - *__lock = __SPIN_LOCK_INITIALIZER; + *__lock = LLL_INITIALIZER; } #endif @@ -50,21 +51,11 @@ void __spin_lock (__spin_lock_t *__lock); _EXTERN_INLINE void __spin_lock (__spin_lock_t *__lock) { - if (! __spin_try_lock (__lock)) - __spin_lock_solid (__lock); + lll_lock (__lock, 0); } #endif -/* Name space-clean internal interface to mutex locks. - - Code internal to the C library uses these functions to lock and unlock - mutex locks. These locks are of type `struct mutex', defined in - <cthreads.h>. The functions here are name space-clean. If the program - is linked with the cthreads library, `__mutex_lock_solid' and - `__mutex_unlock_solid' will invoke the corresponding cthreads functions - to implement real mutex locks. If not, simple stub versions just use - spin locks. */ - +/* Name space-clean internal interface to mutex locks. */ /* Initialize the newly allocated mutex lock LOCK for further use. */ extern void __mutex_init (void *__lock); @@ -84,8 +75,7 @@ void __mutex_lock (void *__lock); _EXTERN_INLINE void __mutex_lock (void *__lock) { - if (! __spin_try_lock ((__spin_lock_t *) __lock)) - __mutex_lock_solid (__lock); + lll_lock (__lock, 0); } #endif @@ -97,8 +87,7 @@ void __mutex_unlock (void *__lock); _EXTERN_INLINE void __mutex_unlock (void *__lock) { - __spin_unlock ((__spin_lock_t *) __lock); - __mutex_unlock_solid (__lock); + lll_unlock (__lock, 0); } #endif @@ -109,7 +98,7 @@ int __mutex_trylock (void *__lock); _EXTERN_INLINE int __mutex_trylock (void *__lock) { - return __spin_try_lock ((__spin_lock_t *) __lock); + return (lll_trylock (__lock) == 0); } #endif diff --git a/mach/lowlevellock.h b/mach/lowlevellock.h new file mode 100644 index 0000000..e60fe91 --- /dev/null +++ b/mach/lowlevellock.h @@ -0,0 +1,80 @@ +/* Copyright (C) 1994-2016 Free Software Foundation, Inc. + This file is part of the GNU C Library. + + The GNU C Library is free software; you can redistribute it and/or + modify it under the terms of the GNU Lesser General Public + License as published by the Free Software Foundation; either + version 3 of the License, or (at your option) any later version. + + The GNU C Library is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + Lesser General Public License for more details. + + You should have received a copy of the GNU Lesser General Public + License along with the GNU C Library; if not, see + <http://www.gnu.org/licenses/>. */ + +#ifndef __MACH_LOWLEVELLOCK_H__ +#define __MACH_LOWLEVELLOCK_H__ 1 + +#include <mach/gnumach.h> +#include <atomic.h> + +/* Gsync flags. */ +#ifndef GSYNC_SHARED + #define GSYNC_SHARED 0x01 + #define GSYNC_QUAD 0x02 + #define GSYNC_TIMED 0x04 + #define GSYNC_BROADCAST 0x08 + #define GSYNC_MUTATE 0x10 +#endif + +/* Static initializer for low-level locks. */ +#define LLL_INITIALIZER 0 + +/* Wait on address PTR, without blocking if its contents + * are different from VAL. */ +#define lll_wait(ptr, val, flags) \ + __gsync_wait (__mach_task_self (), \ + (vm_offset_t)(ptr), (val), 0, 0, (flags)) + +/* Wake one or more threads waiting on address PTR. */ +#define lll_wake(ptr, flags) \ + __gsync_wake (__mach_task_self (), (vm_offset_t)(ptr), 0, (flags)) + +/* Acquire the lock at PTR. */ +#define lll_lock(ptr, flags) \ + ({ \ + int *__iptr = (int *)(ptr); \ + int __flags = (flags); \ + if (*__iptr != 0 || \ + atomic_compare_and_exchange_bool_acq (__iptr, 1, 0) != 0) \ + while (1) \ + { \ + if (atomic_exchange_acq (__iptr, 2) == 0) \ + break; \ + lll_wait (__iptr, 2, __flags); \ + } \ + (void)0; \ + }) + +/* Try to acquire the lock at PTR, without blocking. + * Evaluates to zero on success. */ +#define lll_trylock(ptr) \ + ({ \ + int *__iptr = (int *)(ptr); \ + *__iptr == 0 && \ + atomic_compare_and_exchange_bool_acq (__iptr, 1, 0) == 0 ? 0 : -1; \ + }) + +/* Release the lock at PTR. */ +#define lll_unlock(ptr, flags) \ + ({ \ + int *__iptr = (int *)(ptr); \ + if (atomic_exchange_rel (__iptr, 0) == 2) \ + lll_wake (__iptr, (flags)); \ + (void)0; \ + }) + +#endif diff --git a/mach/mutex-init.c b/mach/mutex-init.c index fc3a5e5..a2ede46 100644 --- a/mach/mutex-init.c +++ b/mach/mutex-init.c @@ -1,5 +1,5 @@ -/* Initialize a cthreads mutex structure. - Copyright (C) 1995-2014 Free Software Foundation, Inc. +/* Initialize a mutex. + Copyright (C) 1995-2016 Free Software Foundation, Inc. This file is part of the GNU C Library. The GNU C Library is free software; you can redistribute it and/or @@ -17,13 +17,10 @@ <http://www.gnu.org/licenses/>. */ #include <lock-intern.h> -#include <cthreads.h> +#include <lowlevellock.h> void __mutex_init (void *lock) { - /* This happens to be name space-safe because it is a macro. - It invokes only spin_lock_init, which is a macro for __spin_lock_init; - and cthread_queue_init, which is a macro for some simple code. */ - mutex_init ((struct mutex *) lock); + *(int *)lock = LLL_INITIALIZER; } diff --git a/mach/mutex-solid.c b/mach/mutex-solid.c index 70e8333..11870d3 100644 --- a/mach/mutex-solid.c +++ b/mach/mutex-solid.c @@ -1,5 +1,5 @@ /* Stub versions of mutex_lock_solid/mutex_unlock_solid for no -lthreads. - Copyright (C) 1995-2014 Free Software Foundation, Inc. + Copyright (C) 1995-2016 Free Software Foundation, Inc. This file is part of the GNU C Library. The GNU C Library is free software; you can redistribute it and/or @@ -17,17 +17,12 @@ <http://www.gnu.org/licenses/>. */ #include <lock-intern.h> -#include <cthreads.h> - -/* If cthreads is linked in, it will define these functions itself to do - real cthreads mutex locks. This file will only be linked in when - cthreads is not used, and `mutexes' are in fact just spin locks (and - some unused storage). */ +#include <lowlevellock.h> void __mutex_lock_solid (void *lock) { - __spin_lock_solid (lock); + lll_lock (lock, 0); } void diff --git a/mach/spin-solid.c b/mach/spin-solid.c index e1e154b..067dd1e 100644 --- a/mach/spin-solid.c +++ b/mach/spin-solid.c @@ -1,4 +1,4 @@ -/* Copyright (C) 1994-2014 Free Software Foundation, Inc. +/* Copyright (C) 1994-2016 Free Software Foundation, Inc. This file is part of the GNU C Library. The GNU C Library is free software; you can redistribute it and/or @@ -16,13 +16,11 @@ <http://www.gnu.org/licenses/>. */ #include <spin-lock.h> -#include <mach/mach_traps.h> +#include <lowlevellock.h> void __spin_lock_solid (spin_lock_t *lock) { - while (__spin_lock_locked (lock) || ! __spin_try_lock (lock)) - /* Yield to another thread (system call). */ - __swtch_pri (0); + lll_lock (lock, 0); } weak_alias (__spin_lock_solid, spin_lock_solid); diff --git a/manual/errno.texi b/manual/errno.texi index 6a691fc..70c574a 100644 --- a/manual/errno.texi +++ b/manual/errno.texi @@ -993,6 +993,20 @@ the normal result is for the operations affected to complete with this error; @pxref{Cancel AIO Operations}. @end deftypevr +@comment errno.h +@comment POSIX.1: Robust mutex owner died +@deftypevr Macro int EOWNERDEAD +@comment errno 120 +The owner of a POSIX robust mutex has died. +@end deftypevr + +@comment errno.h +@comment POSIX.1: Robust mutex irrecoverable +@deftypevr Macro int ENOTRECOVERABLE +@comment errno 121 +An inconsistent POSIX robust mutex has been unlocked before marking it +as consistent again. +@end deftypevr @emph{The following error codes are defined by the Linux/i386 kernel. They are not yet documented.} diff --git a/sysdeps/mach/Makefile b/sysdeps/mach/Makefile index 634ba80..205cd33 100644 --- a/sysdeps/mach/Makefile +++ b/sysdeps/mach/Makefile @@ -1,4 +1,4 @@ -# Copyright (C) 1993-2014 Free Software Foundation, Inc. +# Copyright (C) 1993-2016 Free Software Foundation, Inc. # This file is part of the GNU C Library. # The GNU C Library is free software; you can redistribute it and/or @@ -48,4 +48,17 @@ $(patsubst mach%,m\%h%,$(mach-before-compile)): # Run only if doesn't exist. before-compile += $(mach-before-compile) endif +ifeq (crypt,$(subdir)) + LDLIBS-crypt.so += -lmachuser +else ifeq (dlfcn,$(subdir)) + LDLIBS-dl.so += -lmachuser +else ifeq (nis,$(subdir)) + LDLIBS-nsl.so += -lmachuser + LDLIBS-nss_nis.so += -lmachuser + LDLIBS-nss_nisplus.so += -lmachuser + LDLIBS-nss_compat.so += -lmachuser +else ifeq (nss,$(subdir)) + LDLIBS-$(services:%=nss_%).so += -lmachuser +endif + endif # in-Makerules diff --git a/sysdeps/mach/bits/libc-lock.h b/sysdeps/mach/bits/libc-lock.h index 40b7f2b..89b0923 100644 --- a/sysdeps/mach/bits/libc-lock.h +++ b/sysdeps/mach/bits/libc-lock.h @@ -1,5 +1,5 @@ -/* libc-internal interface for mutex locks. Mach cthreads version. - Copyright (C) 1996-2014 Free Software Foundation, Inc. +/* libc-internal interface for mutex locks. Hurd version using gnumach gsync. + Copyright (C) 1996-2016 Free Software Foundation, Inc. This file is part of the GNU C Library. The GNU C Library is free software; you can redistribute it and/or @@ -19,16 +19,36 @@ #ifndef _BITS_LIBC_LOCK_H #define _BITS_LIBC_LOCK_H 1 -#ifdef _LIBC +#if (_LIBC - 0) || (_CTHREADS_ - 0) +#if (_LIBC - 0) +#include <tls.h> +#endif + #include <cthreads.h> -#define __libc_lock_t struct mutex +#include <lowlevellock.h> + +/* The locking here is very inexpensive, even for inlining. */ +#define _IO_lock_inexpensive 1 + +typedef unsigned int __libc_lock_t; +typedef struct +{ + __libc_lock_t lock; + void *owner; + int cnt; +} __libc_lock_recursive_t; + +typedef __libc_lock_recursive_t __rtld_lock_recursive_t; + +extern char __libc_lock_self0[0]; +#define __libc_lock_owner_self() \ + (__LIBC_NO_TLS() ? (void *)&__libc_lock_self0 : THREAD_SELF) + #else typedef struct __libc_lock_opaque__ __libc_lock_t; +typedef struct __libc_lock_recursive_opaque__ __libc_lock_recursive_t; #endif -/* Type for key of thread specific data. */ -typedef cthread_key_t __libc_key_t; - /* Define a lock variable NAME with storage class CLASS. The lock must be initialized with __libc_lock_init before it can be used (or define it with __libc_lock_define_initialized, below). Use `extern' for CLASS to @@ -41,25 +61,92 @@ typedef cthread_key_t __libc_key_t; /* Define an initialized lock variable NAME with storage class CLASS. */ #define __libc_lock_define_initialized(CLASS,NAME) \ - CLASS __libc_lock_t NAME = MUTEX_INITIALIZER; + CLASS __libc_lock_t NAME = LLL_INITIALIZER; /* Initialize the named lock variable, leaving it in a consistent, unlocked state. */ -#define __libc_lock_init(NAME) __mutex_init (&(NAME)) +#define __libc_lock_init(NAME) (NAME) = LLL_INITIALIZER /* Finalize the named lock variable, which must be locked. It cannot be used again until __libc_lock_init is called again on it. This must be called on a lock variable before the containing storage is reused. */ -#define __libc_lock_fini(NAME) __mutex_unlock (&(NAME)) +#define __libc_lock_fini __libc_lock_unlock +#define __libc_lock_fini_recursive __libc_lock_unlock_recursive +#define __rtld_lock_fini_recursive __rtld_lock_unlock_recursive /* Lock the named lock variable. */ -#define __libc_lock_lock(NAME) __mutex_lock (&(NAME)) +#define __libc_lock_lock(NAME) \ + ({ lll_lock (&(NAME), 0); 0; }) /* Lock the named lock variable. */ -#define __libc_lock_trylock(NAME) (!__mutex_trylock (&(NAME))) +#define __libc_lock_trylock(NAME) lll_trylock (&(NAME)) /* Unlock the named lock variable. */ -#define __libc_lock_unlock(NAME) __mutex_unlock (&(NAME)) +#define __libc_lock_unlock(NAME) \ + ({ lll_unlock (&(NAME), 0); 0; }) + +#define __libc_lock_define_recursive(CLASS,NAME) \ + CLASS __libc_lock_recursive_t NAME; + +#define _LIBC_LOCK_RECURSIVE_INITIALIZER { LLL_INITIALIZER, 0, 0 } + +#define __libc_lock_define_initialized_recursive(CLASS,NAME) \ + CLASS __libc_lock_recursive_t NAME = _LIBC_LOCK_RECURSIVE_INITIALIZER; + +#define __rtld_lock_define_recursive(CLASS,NAME) \ + __libc_lock_define_recursive (CLASS, NAME) +#define _RTLD_LOCK_RECURSIVE_INITIALIZER \ + _LIBC_LOCK_RECURSIVE_INITIALIZER +#define __rtld_lock_define_initialized_recursive(CLASS,NAME) \ + __libc_lock_define_initialized_recursive (CLASS, NAME) + +#define __libc_lock_init_recursive(NAME) \ + ((NAME) = (__libc_lock_recursive_t)_LIBC_LOCK_RECURSIVE_INITIALIZER, 0) + +#define __libc_lock_trylock_recursive(NAME) \ + ({ \ + __libc_lock_recursive_t *const __lock = &(NAME); \ + void *__self = __libc_lock_owner_self (); \ + int __r = 0; \ + if (__self == __lock->owner) \ + ++__lock->cnt; \ + else if ((__r = lll_trylock (&__lock->lock)) == 0) \ + __lock->owner = __self, __lock->cnt = 1; \ + __r; \ + }) + +#define __libc_lock_lock_recursive(NAME) \ + ({ \ + __libc_lock_recursive_t *const __lock = &(NAME); \ + void *__self = __libc_lock_owner_self (); \ + if (__self != __lock->owner) \ + { \ + lll_lock (&__lock->lock, 0); \ + __lock->owner = __self; \ + } \ + ++__lock->cnt; \ + (void)0; \ + }) + +#define __libc_lock_unlock_recursive(NAME) \ + ({ \ + __libc_lock_recursive_t *const __lock = &(NAME); \ + if (--__lock->cnt == 0) \ + { \ + __lock->owner = 0; \ + lll_unlock (&__lock->lock, 0); \ + } \ + }) + + +#define __rtld_lock_initialize(NAME) \ + (void) ((NAME) = (__rtld_lock_recursive_t) _RTLD_LOCK_RECURSIVE_INITIALIZER) +#define __rtld_lock_trylock_recursive(NAME) \ + __libc_lock_trylock_recursive (NAME) +#define __rtld_lock_lock_recursive(NAME) \ + __libc_lock_lock_recursive(NAME) +#define __rtld_lock_unlock_recursive(NAME) \ + __libc_lock_unlock_recursive (NAME) /* XXX for now */ @@ -92,6 +179,10 @@ typedef cthread_key_t __libc_key_t; if ((DOIT) && __save_FCT != 0) \ (*__save_FCT)(__save_ARG); \ +#define __libc_cleanup_push(fct, arg) __libc_cleanup_region_start (1, fct, arg) +#define __libc_cleanup_pop(execute) __libc_cleanup_region_end (execute) + +#if (_CTHREADS_ - 0) /* Use mutexes as once control variables. */ @@ -104,7 +195,6 @@ struct __libc_once #define __libc_once_define(CLASS,NAME) \ CLASS struct __libc_once NAME = { MUTEX_INITIALIZER, 0 } - /* Call handler iff the first call. */ #define __libc_once(ONCE_CONTROL, INIT_FUNCTION) \ do { \ @@ -121,25 +211,22 @@ struct __libc_once #ifdef _LIBC /* We need portable names for some functions. E.g., when they are used as argument to __libc_cleanup_region_start. */ -#define __libc_mutex_unlock __mutex_unlock +#define __libc_mutex_unlock __libc_lock_unlock #endif +/* Type for key of thread specific data. */ +typedef cthread_key_t __libc_key_t; + #define __libc_key_create(KEY,DEST) cthread_keycreate (KEY) #define __libc_setspecific(KEY,VAL) cthread_setspecific (KEY, VAL) void *__libc_getspecific (__libc_key_t key); -/* XXX until cthreads supports recursive locks */ -#define __libc_lock_define_initialized_recursive __libc_lock_define_initialized -#define __libc_lock_init_recursive __libc_lock_init -#define __libc_lock_fini_recursive __libc_lock_fini -#define __libc_lock_trylock_recursive __libc_lock_trylock -#define __libc_lock_unlock_recursive __libc_lock_unlock -#define __libc_lock_lock_recursive __libc_lock_lock - -#define __rtld_lock_define_initialized_recursive __libc_lock_define_initialized -#define __rtld_lock_fini_recursive __libc_lock_fini -#define __rtld_lock_trylock_recursive __libc_lock_trylock -#define __rtld_lock_unlock_recursive __libc_lock_unlock -#define __rtld_lock_lock_recursive __libc_lock_lock +#endif /* _CTHREADS_ */ + +/* Hide the definitions which are only supposed to be used inside libc in + a separate file. This file is not present in the installation! */ +#ifdef _LIBC +# include <bits/libc-lockP.h> +#endif #endif /* bits/libc-lock.h */ diff --git a/sysdeps/mach/hurd/bits/errno.h b/sysdeps/mach/hurd/bits/errno.h index d20ffe6..c5db66e 100644 --- a/sysdeps/mach/hurd/bits/errno.h +++ b/sysdeps/mach/hurd/bits/errno.h @@ -222,6 +222,10 @@ enum __error_t_codes #define ETIME _HURD_ERRNO (117)/* Timer expired */ ECANCELED = _HURD_ERRNO (119), #define ECANCELED _HURD_ERRNO (119)/* Operation canceled */ + EOWNERDEAD = _HURD_ERRNO (120), +#define EOWNERDEAD _HURD_ERRNO (120)/* Robust mutex owner died */ + ENOTRECOVERABLE = _HURD_ERRNO (121), +#define ENOTRECOVERABLE _HURD_ERRNO (121)/* Robust mutex irrecoverable */ /* Errors from <mach/message.h>. */ EMACH_SEND_IN_PROGRESS = 0x10000001, @@ -278,6 +282,8 @@ enum __error_t_codes EKERN_MEMORY_PRESENT = 23, EKERN_WRITE_PROTECTION_FAILURE = 24, EKERN_TERMINATED = 26, + EKERN_TIMEDOUT = 27, + EKERN_INTERRUPTED = 28, /* Errors from <mach/mig_errors.h>. */ EMIG_TYPE_ERROR = -300 /* client type check failure */, @@ -305,7 +311,7 @@ enum __error_t_codes }; -#define _HURD_ERRNOS 120 +#define _HURD_ERRNOS 122 /* User-visible type of error codes. It is ok to use `int' or `kern_return_t' for these, but with `error_t' the debugger prints diff --git a/sysdeps/mach/hurd/bits/libc-lock.h b/sysdeps/mach/hurd/bits/libc-lock.h deleted file mode 100644 index c9872c6..0000000 --- a/sysdeps/mach/hurd/bits/libc-lock.h +++ /dev/null @@ -1,215 +0,0 @@ -/* libc-internal interface for mutex locks. Hurd version using Mach cthreads. - Copyright (C) 1996-2014 Free Software Foundation, Inc. - This file is part of the GNU C Library. - - The GNU C Library is free software; you can redistribute it and/or - modify it under the terms of the GNU Lesser General Public - License as published by the Free Software Foundation; either - version 2.1 of the License, or (at your option) any later version. - - The GNU C Library is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - Lesser General Public License for more details. - - You should have received a copy of the GNU Lesser General Public - License along with the GNU C Library; if not, see - <http://www.gnu.org/licenses/>. */ - -#ifndef _BITS_LIBC_LOCK_H -#define _BITS_LIBC_LOCK_H 1 - -#if (_LIBC - 0) || (_CTHREADS_ - 0) -# if (_LIBC - 0) -# include <tls.h> -# endif -#include <cthreads.h> - -typedef struct mutex __libc_lock_t; -typedef struct -{ - struct mutex mutex; - void *owner; - int count; -} __libc_lock_recursive_t; -typedef __libc_lock_recursive_t __rtld_lock_recursive_t; - -extern char __libc_lock_self0[0]; -#define __libc_lock_owner_self() (__LIBC_NO_TLS() ? &__libc_lock_self0 : THREAD_SELF) - -#else -typedef struct __libc_lock_opaque__ __libc_lock_t; -typedef struct __libc_lock_recursive_opaque__ __libc_lock_recursive_t; -#endif - -/* Define a lock variable NAME with storage class CLASS. The lock must be - initialized with __libc_lock_init before it can be used (or define it - with __libc_lock_define_initialized, below). Use `extern' for CLASS to - declare a lock defined in another module. In public structure - definitions you must use a pointer to the lock structure (i.e., NAME - begins with a `*'), because its storage size will not be known outside - of libc. */ -#define __libc_lock_define(CLASS,NAME) \ - CLASS __libc_lock_t NAME; - -/* Define an initialized lock variable NAME with storage class CLASS. */ -#define __libc_lock_define_initialized(CLASS,NAME) \ - CLASS __libc_lock_t NAME = MUTEX_INITIALIZER; - -/* Initialize the named lock variable, leaving it in a consistent, unlocked - state. */ -#define __libc_lock_init(NAME) __mutex_init (&(NAME)) - -/* Finalize the named lock variable, which must be locked. It cannot be - used again until __libc_lock_init is called again on it. This must be - called on a lock variable before the containing storage is reused. */ -#define __libc_lock_fini(NAME) __mutex_unlock (&(NAME)) -#define __libc_lock_fini_recursive(NAME) __mutex_unlock (&(NAME).mutex) -#define __rtld_lock_fini_recursive(NAME) __mutex_unlock (&(NAME).mutex) - - -/* Lock the named lock variable. */ -#define __libc_lock_lock(NAME) __mutex_lock (&(NAME)) - -/* Lock the named lock variable. */ -#define __libc_lock_trylock(NAME) (!__mutex_trylock (&(NAME))) - -/* Unlock the named lock variable. */ -#define __libc_lock_unlock(NAME) __mutex_unlock (&(NAME)) - - -#define __libc_lock_define_recursive(CLASS,NAME) \ - CLASS __libc_lock_recursive_t NAME; -#define _LIBC_LOCK_RECURSIVE_INITIALIZER { MUTEX_INITIALIZER, 0, 0 } -#define __libc_lock_define_initialized_recursive(CLASS,NAME) \ - CLASS __libc_lock_recursive_t NAME = _LIBC_LOCK_RECURSIVE_INITIALIZER; - -#define __rtld_lock_define_recursive(CLASS,NAME) \ - __libc_lock_define_recursive (CLASS, NAME) -#define _RTLD_LOCK_RECURSIVE_INITIALIZER \ - _LIBC_LOCK_RECURSIVE_INITIALIZER -#define __rtld_lock_define_initialized_recursive(CLASS,NAME) \ - __libc_lock_define_initialized_recursive (CLASS, NAME) - -#define __libc_lock_init_recursive(NAME) \ - ({ __libc_lock_recursive_t *const __lock = &(NAME); \ - __lock->owner = 0; mutex_init (&__lock->mutex); }) - -#define __libc_lock_trylock_recursive(NAME) \ - ({ __libc_lock_recursive_t *const __lock = &(NAME); \ - void *__self = __libc_lock_owner_self (); \ - __mutex_trylock (&__lock->mutex) \ - ? (__lock->owner = __self, __lock->count = 1, 0) \ - : __lock->owner == __self ? (++__lock->count, 0) : 1; }) - -#define __libc_lock_lock_recursive(NAME) \ - ({ __libc_lock_recursive_t *const __lock = &(NAME); \ - void *__self = __libc_lock_owner_self (); \ - if (__mutex_trylock (&__lock->mutex) \ - || (__lock->owner != __self \ - && (__mutex_lock (&__lock->mutex), 1))) \ - __lock->owner = __self, __lock->count = 1; \ - else \ - ++__lock->count; \ - }) -#define __libc_lock_unlock_recursive(NAME) \ - ({ __libc_lock_recursive_t *const __lock = &(NAME); \ - if (--__lock->count == 0) \ - { \ - __lock->owner = 0; \ - __mutex_unlock (&__lock->mutex); \ - } \ - }) - - -#define __rtld_lock_initialize(NAME) \ - (void) ((NAME) = (__rtld_lock_recursive_t) _RTLD_LOCK_RECURSIVE_INITIALIZER) -#define __rtld_lock_trylock_recursive(NAME) \ - __libc_lock_trylock_recursive (NAME) -#define __rtld_lock_lock_recursive(NAME) \ - __libc_lock_lock_recursive(NAME) -#define __rtld_lock_unlock_recursive(NAME) \ - __libc_lock_unlock_recursive (NAME) - - -/* XXX for now */ -#define __libc_rwlock_define __libc_lock_define -#define __libc_rwlock_define_initialized __libc_lock_define_initialized -#define __libc_rwlock_init __libc_lock_init -#define __libc_rwlock_fini __libc_lock_fini -#define __libc_rwlock_rdlock __libc_lock_lock -#define __libc_rwlock_wrlock __libc_lock_lock -#define __libc_rwlock_tryrdlock __libc_lock_trylock -#define __libc_rwlock_trywrlock __libc_lock_trylock -#define __libc_rwlock_unlock __libc_lock_unlock - - -/* Start a critical region with a cleanup function */ -#define __libc_cleanup_region_start(DOIT, FCT, ARG) \ -{ \ - typeof (***(FCT)) *__save_FCT = (DOIT) ? (FCT) : 0; \ - typeof (ARG) __save_ARG = ARG; \ - /* close brace is in __libc_cleanup_region_end below. */ - -/* End a critical region started with __libc_cleanup_region_start. */ -#define __libc_cleanup_region_end(DOIT) \ - if ((DOIT) && __save_FCT != 0) \ - (*__save_FCT)(__save_ARG); \ -} - -/* Sometimes we have to exit the block in the middle. */ -#define __libc_cleanup_end(DOIT) \ - if ((DOIT) && __save_FCT != 0) \ - (*__save_FCT)(__save_ARG); \ - -#define __libc_cleanup_push(fct, arg) __libc_cleanup_region_start (1, fct, arg) -#define __libc_cleanup_pop(execute) __libc_cleanup_region_end (execute) - -#if (_CTHREADS_ - 0) - -/* Use mutexes as once control variables. */ - -struct __libc_once - { - __libc_lock_t lock; - int done; - }; - -#define __libc_once_define(CLASS,NAME) \ - CLASS struct __libc_once NAME = { MUTEX_INITIALIZER, 0 } - -/* Call handler iff the first call. */ -#define __libc_once(ONCE_CONTROL, INIT_FUNCTION) \ - do { \ - __libc_lock_lock (ONCE_CONTROL.lock); \ - if (!ONCE_CONTROL.done) \ - (INIT_FUNCTION) (); \ - ONCE_CONTROL.done = 1; \ - __libc_lock_unlock (ONCE_CONTROL.lock); \ - } while (0) - -/* Get once control variable. */ -#define __libc_once_get(ONCE_CONTROL) ((ONCE_CONTROL).done != 0) - -#ifdef _LIBC -/* We need portable names for some functions. E.g., when they are - used as argument to __libc_cleanup_region_start. */ -#define __libc_mutex_unlock __mutex_unlock -#endif - -/* Type for key of thread specific data. */ -typedef cthread_key_t __libc_key_t; - -#define __libc_key_create(KEY,DEST) cthread_keycreate (KEY) -#define __libc_setspecific(KEY,VAL) cthread_setspecific (KEY, VAL) -void *__libc_getspecific (__libc_key_t key); - -#endif /* _CTHREADS_ */ - -/* Hide the definitions which are only supposed to be used inside libc in - a separate file. This file is not present in the installation! */ -#ifdef _LIBC -# include <bits/libc-lockP.h> -#endif - -#endif /* bits/libc-lock.h */ diff --git a/sysdeps/mach/hurd/setpgid.c b/sysdeps/mach/hurd/setpgid.c index 6662d3d..655bc3c 100644 --- a/sysdeps/mach/hurd/setpgid.c +++ b/sysdeps/mach/hurd/setpgid.c @@ -1,4 +1,4 @@ -/* Copyright (C) 1993-2014 Free Software Foundation, Inc. +/* Copyright (C) 1993-2016 Free Software Foundation, Inc. This file is part of the GNU C Library. The GNU C Library is free software; you can redistribute it and/or @@ -19,6 +19,7 @@ #include <unistd.h> #include <hurd.h> #include <hurd/port.h> +#include <lowlevellock.h> /* Set the process group ID of the process matching PID to PGID. If PID is zero, the current process's process group ID is set. @@ -40,14 +41,7 @@ __setpgid (pid, pgid) /* Synchronize with the signal thread to make sure we have received and processed proc_newids before returning to the user. */ while (_hurd_pids_changed_stamp == stamp) - { -#ifdef noteven - /* XXX we have no need for a mutex, but cthreads demands one. */ - __condition_wait (&_hurd_pids_changed_sync, NULL); -#else - __swtch_pri(0); -#endif - } + lll_wait (&_hurd_pids_changed_stamp, stamp, 0); return 0; diff --git a/sysdeps/mach/hurd/setsid.c b/sysdeps/mach/hurd/setsid.c index 36ec3d8..cf6140c 100644 --- a/sysdeps/mach/hurd/setsid.c +++ b/sysdeps/mach/hurd/setsid.c @@ -1,4 +1,4 @@ -/* Copyright (C) 1993-2014 Free Software Foundation, Inc. +/* Copyright (C) 1993-2016 Free Software Foundation, Inc. This file is part of the GNU C Library. The GNU C Library is free software; you can redistribute it and/or @@ -20,6 +20,8 @@ #include <hurd.h> #include <hurd/port.h> #include <hurd/fd.h> +#include <hurd/ioctl.h> +#include <lowlevellock.h> /* Create a new session with the calling process as its leader. The process group IDs of the session and the calling process @@ -54,14 +56,7 @@ __setsid (void) returned by `getpgrp ()' in other threads) has been updated before we return. */ while (_hurd_pids_changed_stamp == stamp) - { -#ifdef noteven - /* XXX we have no need for a mutex, but cthreads demands one. */ - __condition_wait (&_hurd_pids_changed_sync, NULL); -#else - __swtch_pri (0); -#endif - } + lll_wait (&_hurd_pids_changed_stamp, stamp, 0); } HURD_CRITICAL_END;