* Makefile: Removed objects and added pt-mutex. * include/pthread/pthread.h: Added definitions for POSIX function 'pthread_mutex_consistent' and added GNU extension with the '_np' suffix. * sysdeps/generic/pt-mutex.c: New file. Contains the implementation for POSIX mutexes and their attributes, plus GNU/Hurd extensions. * sysdeps/pthread/bits/mutex.h: Redefined the mutex type. --- diff --git a/Makefile b/Makefile index bfdae7b..8f3144b 100644 --- a/Makefile +++ b/Makefile @@ -81,18 +81,7 @@ libpthread-routines := pt-attr pt-attr-destroy pt-attr-getdetachstate \ pt-testcancel \ pt-cancel \ \ - pt-mutexattr \ - pt-mutexattr-destroy pt-mutexattr-init \ - pt-mutexattr-getprioceiling pt-mutexattr-getprotocol \ - pt-mutexattr-getpshared pt-mutexattr-gettype \ - pt-mutexattr-setprioceiling pt-mutexattr-setprotocol \ - pt-mutexattr-setpshared pt-mutexattr-settype \ - \ - pt-mutex-init pt-mutex-destroy \ - pt-mutex-lock pt-mutex-trylock pt-mutex-timedlock \ - pt-mutex-unlock \ - pt-mutex-transfer-np \ - pt-mutex-getprioceiling pt-mutex-setprioceiling \ + pt-mutex \ \ pt-rwlock-attr \ pt-rwlockattr-init pt-rwlockattr-destroy \ diff --git a/include/pthread/pthread.h b/include/pthread/pthread.h index 3aa6a93..d73a3e7 100644 --- a/include/pthread/pthread.h +++ b/include/pthread/pthread.h @@ -399,6 +399,18 @@ extern int pthread_mutex_setprioceiling (pthread_mutex_t *__restrict __mutex, __THROW __nonnull ((1, 3)); #endif +#ifdef __USE_XOPEN2K8 + +/* Declare the state protected by robust mutex MTXP as consistent. */ +extern int pthread_mutex_consistent (pthread_mutex_t *__mtxp) + __THROW __nonnull ((1)); + +# ifdef __USE_GNU +extern int pthread_mutex_consistent_np (pthread_mutex_t *__mtxp) + __THROW __nonnull ((1)); +# endif +#endif + /* Condition attributes. */ diff --git a/sysdeps/generic/pt-mutex.c b/sysdeps/generic/pt-mutex.c new file mode 100644 index 0000000..c0eacca --- /dev/null +++ b/sysdeps/generic/pt-mutex.c @@ -0,0 +1,536 @@ +/* Copyright (C) 2016 Free Software Foundation, Inc. + Contributed by Agustina Arzille , 2016. + + This program is free software; you can redistribute it and/or + modify it under the terms of the GNU General Public License + as published by the Free Software Foundation; either + version 3 of the license, or (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public + License along with this program; if not, see + . +*/ + +#include +#include +#include +#include +#include + +static const pthread_mutexattr_t dfl_attr = +{ + .__prioceiling = 0, + .__protocol = PTHREAD_PRIO_NONE, + .__pshared = PTHREAD_PROCESS_PRIVATE, + .__mutex_type = __PTHREAD_MUTEX_TIMED +}; + +int pthread_mutexattr_init (pthread_mutexattr_t *attrp) +{ + *attrp = dfl_attr; + return (0); +} + +int pthread_mutexattr_destroy (pthread_mutexattr_t *attrp) +{ + (void)attrp; + return (0); +} + +int pthread_mutexattr_settype (pthread_mutexattr_t *attrp, int type) +{ + if (type < 0 || type > __PTHREAD_MUTEX_RECURSIVE) + return (EINVAL); + + attrp->__mutex_type = type; + return (0); +} + +int pthread_mutexattr_gettype (const pthread_mutexattr_t *attrp, int *outp) +{ + *outp = attrp->__mutex_type; + return (0); +} + +int pthread_mutexattr_setpshared (pthread_mutexattr_t *attrp, int pshared) +{ + if (pshared != PTHREAD_PROCESS_PRIVATE && + pshared != PTHREAD_PROCESS_SHARED) + return (EINVAL); + + attrp->__pshared = pshared; + return (0); +} + +int pthread_mutexattr_getpshared (const pthread_mutexattr_t *attrp, int *outp) +{ + *outp = attrp->__pshared; + return (0); +} + +int pthread_mutexattr_setrobust (pthread_mutexattr_t *attrp, int robust) +{ + if (robust != PTHREAD_MUTEX_ROBUST && + robust != PTHREAD_MUTEX_STALLED) + return (EINVAL); + + attrp->__prioceiling |= robust; + return (0); +} + +weak_alias (pthread_mutexattr_setrobust, pthread_mutexattr_setrobust_np) + +int pthread_mutexattr_getrobust (const pthread_mutexattr_t *attrp, int *outp) +{ + *outp = (attrp->__prioceiling & PTHREAD_MUTEX_ROBUST) ? + PTHREAD_MUTEX_ROBUST : PTHREAD_MUTEX_STALLED; + return (0); +} + +weak_alias (pthread_mutexattr_getrobust, pthread_mutexattr_getrobust_np) + +int pthread_mutexattr_setprioceiling (pthread_mutexattr_t *attrp, int cl) +{ + (void)attrp; (void)cl; + return (ENOSYS); +} + +stub_warning (pthread_mutexattr_setprioceiling) + +int pthread_mutexattr_getprioceiling (const pthread_mutexattr_t *ap, int *clp) +{ + (void)ap; (void)clp; + return (ENOSYS); +} + +stub_warning (pthread_mutexattr_getprioceiling) + +int pthread_mutexattr_setprotocol (pthread_mutexattr_t *attrp, int proto) +{ + (void)attrp; + return (proto == PTHREAD_PRIO_NONE ? 0 : + proto != PTHREAD_PRIO_INHERIT && + proto != PTHREAD_PRIO_PROTECT ? EINVAL : ENOTSUP); +} + +int pthread_mutexattr_getprotocol (const pthread_mutexattr_t *attrp, int *ptp) +{ + *ptp = attrp->__protocol; + return (0); +} + +int _pthread_mutex_init (pthread_mutex_t *mtxp, + const pthread_mutexattr_t *attrp) +{ + if (attrp == NULL) + attrp = &dfl_attr; + + mtxp->__flags = (attrp->__pshared == PTHREAD_PROCESS_SHARED ? + GSYNC_SHARED : 0) | ((attrp->__prioceiling & PTHREAD_MUTEX_ROBUST) ? + PTHREAD_MUTEX_ROBUST : 0); + + mtxp->__type = attrp->__mutex_type + + (attrp->__mutex_type != __PTHREAD_MUTEX_TIMED); + + mtxp->__owner_id = 0; + mtxp->__shpid = 0; + mtxp->__cnt = 0; + mtxp->__lock = 0; + + return (0); +} + +strong_alias (_pthread_mutex_init, pthread_mutex_init) + +/* Lock routines. */ + +/* Special ID used to signal an unrecoverable robust mutex. */ +#define NOTRECOVERABLE_ID (1U << 31) + +/* Common path for robust mutexes. Assumes the variable 'ret' + * is bound in the function this is called from. */ +#define ROBUST_LOCK(self, mtxp, cb, ...) \ + if (mtxp->__owner_id == NOTRECOVERABLE_ID) \ + return (ENOTRECOVERABLE); \ + else if (mtxp->__owner_id == self->thread && \ + __getpid () == (int)(mtxp->__lock & LLL_OWNER_MASK)) \ + { \ + if (mtxp->__type == PT_MTX_RECURSIVE) \ + { \ + if (__glibc_unlikely (mtxp->__cnt + 1 == 0)) \ + return (EAGAIN); \ + \ + ++mtxp->__cnt; \ + return (0); \ + } \ + else if (mtxp->__type == PT_MTX_ERRORCHECK) \ + return (EDEADLK); \ + } \ + \ + ret = cb (&mtxp->__lock, ##__VA_ARGS__); \ + if (ret == 0 || ret == EOWNERDEAD) \ + { \ + if (mtxp->__owner_id == ENOTRECOVERABLE) \ + ret = ENOTRECOVERABLE; \ + else \ + { \ + mtxp->__owner_id = self->thread; \ + mtxp->__cnt = 1; \ + if (ret == EOWNERDEAD) \ + { \ + mtxp->__lock = mtxp->__lock | LLL_DEAD_OWNER; \ + atomic_write_barrier (); \ + } \ + } \ + } \ + (void)0 + +/* Check that a thread owns the mutex. For non-robust, task-shared + * objects, we have to check the thread *and* process-id. */ +#define mtx_owned_p(mtx, pt, flags) \ + ((mtx)->__owner_id == (pt)->thread && \ + (((flags) & GSYNC_SHARED) == 0 || \ + (mtx)->__shpid == __getpid ())) + +/* Record a thread as the owner of the mutex. */ +#define mtx_set_owner(mtx, pt, flags) \ + (void) \ + ({ \ + (mtx)->__owner_id = (pt)->thread; \ + if ((flags) & GSYNC_SHARED) \ + (mtx)->__shpid = __getpid (); \ + }) + +/* Redefined mutex types. The +1 is for binary compatibility. */ +#define PT_MTX_NORMAL __PTHREAD_MUTEX_TIMED +#define PT_MTX_RECURSIVE (__PTHREAD_MUTEX_RECURSIVE + 1) +#define PT_MTX_ERRORCHECK (__PTHREAD_MUTEX_ERRORCHECK + 1) + +/* Mutex type, including robustness. */ +#define MTX_TYPE(mtxp) \ + ((mtxp)->__type | ((mtxp)->__flags & PTHREAD_MUTEX_ROBUST)) + +extern int __getpid (void) __attribute__ ((const)); + +int __pthread_mutex_lock (pthread_mutex_t *mtxp) +{ + struct __pthread *self = _pthread_self (); + int flags = mtxp->__flags & GSYNC_SHARED; + int ret = 0; + + switch (MTX_TYPE (mtxp)) + { + case PT_MTX_NORMAL: + lll_lock (&mtxp->__lock, flags); + break; + + case PT_MTX_RECURSIVE: + if (mtx_owned_p (mtxp, self, flags)) + { + if (__glibc_unlikely (mtxp->__cnt + 1 == 0)) + return (EAGAIN); + + ++mtxp->__cnt; + return (ret); + } + + lll_lock (&mtxp->__lock, flags); + mtx_set_owner (mtxp, self, flags); + mtxp->__cnt = 1; + break; + + case PT_MTX_ERRORCHECK: + if (mtx_owned_p (mtxp, self, flags)) + return (EDEADLK); + + lll_lock (&mtxp->__lock, flags); + mtx_set_owner (mtxp, self, flags); + break; + + case PT_MTX_NORMAL | PTHREAD_MUTEX_ROBUST: + case PT_MTX_RECURSIVE | PTHREAD_MUTEX_ROBUST: + case PT_MTX_ERRORCHECK | PTHREAD_MUTEX_ROBUST: + ROBUST_LOCK (self, mtxp, lll_robust_lock, flags); + break; + + default: + ret = EINVAL; + break; + } + + return (ret); +} + +strong_alias (__pthread_mutex_lock, _pthread_mutex_lock) +strong_alias (__pthread_mutex_lock, pthread_mutex_lock) + +int __pthread_mutex_trylock (pthread_mutex_t *mtxp) +{ + struct __pthread *self = _pthread_self (); + int ret; + + switch (MTX_TYPE (mtxp)) + { + case PT_MTX_NORMAL: + ret = lll_trylock (&mtxp->__lock); + break; + + case PT_MTX_RECURSIVE: + if (mtx_owned_p (mtxp, self, mtxp->__flags)) + { + if (__glibc_unlikely (mtxp->__cnt + 1 == 0)) + return (EAGAIN); + + ++mtxp->__cnt; + ret = 0; + } + else if ((ret = lll_trylock (&mtxp->__lock)) == 0) + { + mtx_set_owner (mtxp, self, mtxp->__flags); + mtxp->__cnt = 1; + } + + break; + + case PT_MTX_ERRORCHECK: + if ((ret = lll_trylock (&mtxp->__lock)) == 0) + mtx_set_owner (mtxp, self, mtxp->__flags); + break; + + case PT_MTX_NORMAL | PTHREAD_MUTEX_ROBUST: + case PT_MTX_RECURSIVE | PTHREAD_MUTEX_ROBUST: + case PT_MTX_ERRORCHECK | PTHREAD_MUTEX_ROBUST: + ROBUST_LOCK (self, mtxp, lll_robust_trylock); + break; + + default: + ret = EINVAL; + break; + } + + return (ret); +} + +strong_alias (__pthread_mutex_trylock, _pthread_mutex_trylock) +strong_alias (__pthread_mutex_trylock, pthread_mutex_trylock) + +int pthread_mutex_timedlock (pthread_mutex_t *mtxp, + const struct timespec *tsp) +{ + struct __pthread *self = _pthread_self (); + int ret, flags = mtxp->__flags & GSYNC_SHARED; + + switch (MTX_TYPE (mtxp)) + { + case PT_MTX_NORMAL: + ret = lll_abstimed_lock (&mtxp->__lock, tsp, flags); + break; + + case PT_MTX_RECURSIVE: + if (mtx_owned_p (mtxp, self, flags)) + { + if (__glibc_unlikely (mtxp->__cnt + 1 == 0)) + return (EAGAIN); + + ++mtxp->__cnt; + ret = 0; + } + else if ((ret = lll_abstimed_lock (&mtxp->__lock, + tsp, flags)) == 0) + { + mtx_set_owner (mtxp, self, flags); + mtxp->__cnt = 1; + } + + break; + + case PT_MTX_ERRORCHECK: + if (mtx_owned_p (mtxp, self, flags)) + ret = EDEADLK; + else if ((ret = lll_abstimed_lock (&mtxp->__lock, + tsp, flags)) == 0) + mtx_set_owner (mtxp, self, flags); + + break; + + case PT_MTX_NORMAL | PTHREAD_MUTEX_ROBUST: + case PT_MTX_RECURSIVE | PTHREAD_MUTEX_ROBUST: + case PT_MTX_ERRORCHECK | PTHREAD_MUTEX_ROBUST: + ROBUST_LOCK (self, mtxp, lll_robust_abstimed_lock, tsp, flags); + break; + + default: + ret = EINVAL; + break; + } + + return (ret); +} + +int __pthread_mutex_unlock (pthread_mutex_t *mtxp) +{ + struct __pthread *self = _pthread_self (); + int ret = 0, flags = mtxp->__flags & GSYNC_SHARED; + + switch (MTX_TYPE (mtxp)) + { + case PT_MTX_NORMAL: + lll_unlock (&mtxp->__lock, flags); + break; + + case PT_MTX_RECURSIVE: + if (!mtx_owned_p (mtxp, self, flags)) + ret = EPERM; + else if (--mtxp->__cnt == 0) + { + mtxp->__owner_id = mtxp->__shpid = 0; + lll_unlock (&mtxp->__lock, flags); + } + + break; + + case PT_MTX_ERRORCHECK: + if (!mtx_owned_p (mtxp, self, flags)) + ret = EPERM; + else + { + mtxp->__owner_id = mtxp->__shpid = 0; + lll_unlock (&mtxp->__lock, flags); + } + + break; + + case PT_MTX_NORMAL | PTHREAD_MUTEX_ROBUST: + case PT_MTX_RECURSIVE | PTHREAD_MUTEX_ROBUST: + case PT_MTX_ERRORCHECK | PTHREAD_MUTEX_ROBUST: + if (mtxp->__owner_id == NOTRECOVERABLE_ID) + ; /* Nothing to do. */ + else if (mtxp->__owner_id != self->thread || + (int)(mtxp->__lock & LLL_OWNER_MASK) != __getpid ()) + ret = EPERM; + else if (--mtxp->__cnt == 0) + { + /* Release the lock. If it's in an inconsistent + * state, mark it as irrecoverable. */ + mtxp->__owner_id = (mtxp->__lock & LLL_DEAD_OWNER) ? + NOTRECOVERABLE_ID : 0; + lll_robust_unlock (&mtxp->__lock, flags); + } + + break; + + default: + ret = EINVAL; + break; + } + + return (ret); +} + +strong_alias (__pthread_mutex_unlock, _pthread_mutex_unlock) +strong_alias (__pthread_mutex_unlock, pthread_mutex_unlock) + +int pthread_mutex_consistent (pthread_mutex_t *mtxp) +{ + int ret = EINVAL; + unsigned int val = mtxp->__lock; + + if ((mtxp->__flags & PTHREAD_MUTEX_ROBUST) != 0 && + (val & LLL_DEAD_OWNER) != 0 && + atomic_compare_and_exchange_bool_acq (&mtxp->__lock, + __getpid () | LLL_WAITERS, val) == 0) + { + /* The mutex is now ours, and it's consistent. */ + mtxp->__owner_id = _pthread_self()->thread; + mtxp->__cnt = 1; + ret = 0; + } + + return (ret); +} + +weak_alias (pthread_mutex_consistent, pthread_mutex_consistent_np) + +int __pthread_mutex_transfer_np (pthread_mutex_t *mtxp, pthread_t th) +{ + struct __pthread *self = _pthread_self (); + struct __pthread *pt = __pthread_getid (th); + + if (!pt) + return (ESRCH); + else if (pt == self) + return (0); + + int ret = 0; + int flags = mtxp->__flags & GSYNC_SHARED; + + switch (MTX_TYPE (mtxp)) + { + case PT_MTX_NORMAL: + break; + + case PT_MTX_RECURSIVE: + case PT_MTX_ERRORCHECK: + if (!mtx_owned_p (mtxp, self, flags)) + ret = EPERM; + else + mtx_set_owner (mtxp, pt, flags); + + break; + + case PT_MTX_NORMAL | PTHREAD_MUTEX_ROBUST: + case PT_MTX_RECURSIVE | PTHREAD_MUTEX_ROBUST: + case PT_MTX_ERRORCHECK | PTHREAD_MUTEX_ROBUST: + /* Note that this can be used to transfer an inconsistent + * mutex as well. The new owner will still have the same + * flags as the original. */ + if (mtxp->__owner_id != self->thread || + (int)(mtxp->__lock & LLL_OWNER_MASK) != __getpid ()) + ret = EPERM; + else + mtxp->__owner_id = pt->thread; + + break; + + default: + ret = EINVAL; + } + + return (ret); +} + +strong_alias (__pthread_mutex_transfer_np, pthread_mutex_transfer_np) + +int pthread_mutex_setprioceiling (pthread_mutex_t *mtxp, int cl, int *prp) +{ + (void)mtxp; (void)cl; (void)prp; + return (ENOSYS); +} + +stub_warning (pthread_mutex_setprioceiling) + +int pthread_mutex_getprioceiling (const pthread_mutex_t *mtxp, int *clp) +{ + (void)mtxp; (void)clp; + return (ENOSYS); +} + +stub_warning (pthread_mutex_getprioceiling) + +int _pthread_mutex_destroy (pthread_mutex_t *mtxp) +{ + atomic_read_barrier (); + if (*(volatile unsigned int *)&mtxp->__lock != 0) + return (EBUSY); + + mtxp->__type = -1; + return (0); +} + +strong_alias (_pthread_mutex_destroy, pthread_mutex_destroy) + diff --git a/sysdeps/pthread/bits/mutex.h b/sysdeps/pthread/bits/mutex.h index 3120237..a52a2ad 100644 --- a/sysdeps/pthread/bits/mutex.h +++ b/sysdeps/pthread/bits/mutex.h @@ -1,6 +1,6 @@ /* Mutex type. Generic version. - Copyright (C) 2000, 2002, 2005, 2006, 2007, 2008, 2009 + Copyright (C) 2000-2016 Free Software Foundation, Inc. This file is part of the GNU C Library. @@ -31,43 +31,32 @@ # undef __need_pthread_mutex # define __pthread_mutex_defined -# include # include /* User visible part of a mutex. */ struct __pthread_mutex - { - __pthread_spinlock_t __held; - __pthread_spinlock_t __lock; - /* In cthreads, mutex_init does not initialized thre third - pointer, as such, we cannot rely on its value for anything. */ - char *__cthreadscompat1; - struct __pthread *__queue; - struct __pthread_mutexattr *__attr; - void *__data; - /* Up to this point, we are completely compatible with cthreads - and what libc expects. */ - void *__owner; - unsigned __locks; - /* If NULL then the default attributes apply. */ - }; - -/* Initializer for a mutex. N.B. this also happens to be compatible - with the cthread mutex initializer. */ -# define __PTHREAD_MUTEX_INITIALIZER \ - { __PTHREAD_SPIN_LOCK_INITIALIZER, __PTHREAD_SPIN_LOCK_INITIALIZER, 0, 0, 0, 0, 0, 0 } - -# define __PTHREAD_ERRORCHECK_MUTEXATTR ((struct __pthread_mutexattr *) ((unsigned long) __PTHREAD_MUTEX_ERRORCHECK + 1)) - -# define __PTHREAD_ERRORCHECK_MUTEX_INITIALIZER \ - { __PTHREAD_SPIN_LOCK_INITIALIZER, __PTHREAD_SPIN_LOCK_INITIALIZER, 0, 0, \ - __PTHREAD_ERRORCHECK_MUTEXATTR, 0, 0, 0 } - -# define __PTHREAD_RECURSIVE_MUTEXATTR ((struct __pthread_mutexattr *) ((unsigned long) __PTHREAD_MUTEX_RECURSIVE + 1)) - -# define __PTHREAD_RECURSIVE_MUTEX_INITIALIZER \ - { __PTHREAD_SPIN_LOCK_INITIALIZER, __PTHREAD_SPIN_LOCK_INITIALIZER, 0, 0, \ - __PTHREAD_RECURSIVE_MUTEXATTR, 0, 0, 0 } +{ + unsigned int __lock; + unsigned int __owner_id; + unsigned int __cnt; + int __shpid; + int __type; + int __flags; + unsigned int __reserved1; + unsigned int __reserved2; +}; + +/* Static mutex initializers. */ +#define __PTHREAD_MUTEX_INITIALIZER \ + { 0, 0, 0, 0, __PTHREAD_MUTEX_TIMED, 0, 0, 0 } + +/* The +1 is to mantain binary compatibility with the old + * libpthread implementation. */ +#define __PTHREAD_ERRORCHECK_MUTEX_INITIALIZER \ + { 0, 0, 0, 0, __PTHREAD_MUTEX_ERRORCHECK + 1, 0, 0, 0 } + +#define __PTHREAD_RECURSIVE_MUTEX_INITIALIZER \ + { 0, 0, 0, 0, __PTHREAD_MUTEX_RECURSIVE + 1, 0, 0, 0 } # endif #endif /* Not __pthread_mutex_defined. */