* Makefile: Removed objects and added pt-rwlock. Also added a compilation flag specific for pt-rwlock. * sysdeps/generic/pt-rwlock.c: New file. Contains the implementation for POSIX read-write locks and their attributes. * sysdeps/pthread/bits/rwlock.h: Redefined the rwlock type. --- diff --git a/Makefile b/Makefile index bfdae7b..dd6bf94 100644 --- a/Makefile +++ b/Makefile @@ -94,15 +94,7 @@ libpthread-routines := pt-attr pt-attr-destroy pt-attr-getdetachstate \ pt-mutex-transfer-np \ pt-mutex-getprioceiling pt-mutex-setprioceiling \ \ - pt-rwlock-attr \ - pt-rwlockattr-init pt-rwlockattr-destroy \ - pt-rwlockattr-getpshared pt-rwlockattr-setpshared \ - \ - pt-rwlock-init pt-rwlock-destroy \ - pt-rwlock-rdlock pt-rwlock-tryrdlock \ - pt-rwlock-trywrlock pt-rwlock-wrlock \ - pt-rwlock-timedrdlock pt-rwlock-timedwrlock \ - pt-rwlock-unlock \ + pt-rwlock \ \ pt-cond \ pt-condattr-init pt-condattr-destroy \ @@ -152,6 +144,9 @@ libpthread-routines := pt-attr pt-attr-destroy pt-attr-getdetachstate \ cthreads-compat \ $(SYSDEPS) +# Additional flags. +CFLAGS-pt-rwlock.c = -msse2 + ifeq ($(IN_GLIBC),no) SRCS := $(addsuffix .c,$(libpthread-routines)) OBJS = $(addsuffix .o,$(basename $(notdir $(SRCS)))) diff --git a/sysdeps/generic/pt-rwlock.c b/sysdeps/generic/pt-rwlock.c new file mode 100644 index 0000000..4aac7d4 --- /dev/null +++ b/sysdeps/generic/pt-rwlock.c @@ -0,0 +1,399 @@ +/* Copyright (C) 2016 Free Software Foundation, Inc. + Contributed by Agustina Arzille , 2016. + + This program is free software; you can redistribute it and/or + modify it under the terms of the GNU General Public License + as published by the Free Software Foundation; either + version 3 of the license, or (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public + License along with this program; if not, see + . +*/ + +#include +#include +#include +#include + +static const pthread_rwlockattr_t dfl_attr = +{ + .__pshared = PTHREAD_PROCESS_PRIVATE +}; + +int pthread_rwlockattr_setpshared (pthread_rwlockattr_t *attrp, int pshared) +{ + if (pshared != PTHREAD_PROCESS_PRIVATE && + pshared != PTHREAD_PROCESS_SHARED) + return (EINVAL); + + attrp->__pshared = pshared; + return (0); +} + +int pthread_rwlockattr_getpshared (const pthread_rwlockattr_t *ap, int *outp) +{ + *outp = ap->__pshared; + return (0); +} + +int pthread_rwlockattr_destroy (pthread_rwlockattr_t *attrp) +{ + (void)attrp; + return (0); +} + +/* Read-write locks have the following memory layout: + * 0 4 8 12 16 + * |__shpid__|__qwriters__|__owner_id__|__nreaders__| + * + * Whenever a thread wants to acquire either lock, it first has to check + * the OID field. It may be unowned, owned by readers, or owner by a + * particular writer. For reader ownership, we use a special OID that no + * thread can ever have. + * + * When it comes to waiting for the lock to change ownership, we need + * different wait queues for readers and writers. However, both of them + * have to monitor the OID field for changes. This is where 64-bit gsync + * comes into play: Readers will wait on the address of the OID, while + * writers will wait on the 64-bit address that starts at NWRITERS and + * extends to OID as well. + * + * This approach can cause some extra work on the writer side, but it's + * more efficient by virtue of being lockless. As long as we have 64-bit + * atomics, we can safely implement the POSIX read-write lock interface + * without using any internal locks. */ + +#define __rwl_atp(ptr, idx) (((unsigned int *)(ptr))[idx]) + +/* Access the fields described above. */ +#define rwl_qwr(val) __rwl_atp (&(val), -1) +#define rwl_oid(val) __rwl_atp (&(val), +0) +#define rwl_nrd(val) __rwl_atp (&(val), +1) + +/* Special ID's to represent unowned and readers-owned locks. */ +#define RWLOCK_UNOWNED (0) +#define RWLOCK_RO (1U << 31) + +#define ID_MASK ~RWLOCK_RO + +/* Access the owner's PID for task-shared rwlocks. */ +#define rwl_spid(rwl) *(unsigned int *)&(rwl)->__shpid_qwr + +int _pthread_rwlock_init (pthread_rwlock_t *rwp, + const pthread_rwlockattr_t *attrp) +{ + if (!attrp) + attrp = &dfl_attr; + + rwp->__shpid_qwr.qv = rwp->__oid_nrd.qv = 0; + rwp->__flags = attrp->__pshared == PTHREAD_PROCESS_SHARED ? + GSYNC_SHARED : 0; + + return (0); +} + +strong_alias (_pthread_rwlock_init, pthread_rwlock_init) + +/* We need a function, because we're using a macro that + * expands into a list of arguments. */ +static inline int +catomic_casx_bool (unsigned long long *ptr, unsigned int elo, + unsigned int ehi, unsigned int nlo, unsigned int nhi) +{ + return (atomic_casx_bool (ptr, elo, ehi, nlo, nhi)); +} + +extern int __getpid (void) __attribute__ ((const)); + +/* Test that a read-write lock is owned by a particular thread. */ +#define rwl_owned_p(rwp, tid, flags) \ + (rwl_oid ((rwp)->__oid_nrd) == (tid) && \ + (((flags) & GSYNC_SHARED) == 0 || \ + rwl_spid (rwp) == (unsigned int)__getpid ())) + +#define rwl_setown(rwp, flags) \ + do \ + { \ + if ((flags) & GSYNC_SHARED) \ + rwl_spid(rwp) = __getpid (); \ + } \ + while (0) + +int __pthread_rwlock_rdlock (pthread_rwlock_t *rwp) +{ + int flags = rwp->__flags & GSYNC_SHARED; + + /* Test that we don't own the write-lock already. */ + if (rwl_owned_p (rwp, _pthread_self()->thread, flags)) + return (EDEADLK); + + while (1) + { + union hurd_xint tmp = { atomic_loadx (&rwp->__oid_nrd.qv) }; + if ((rwl_oid (tmp) & ID_MASK) == 0) + { + /* The lock is either unowned, or the readers hold it. */ + if (catomic_casx_bool (&rwp->__oid_nrd.qv, + hurd_xint_pair (tmp.lo, tmp.hi), + hurd_xint_pair (RWLOCK_RO, rwl_nrd (tmp) + 1))) + { + /* If we grabbed an unowned lock and there were readers + * queued, notify our fellows so they stop blocking. */ + if (rwl_oid (tmp) != RWLOCK_RO && rwl_nrd (tmp) > 0) + lll_wake (&rwl_oid(rwp->__oid_nrd), flags | GSYNC_BROADCAST); + + return (0); + } + } + else + { + /* A writer holds the lock. Sleep. */ + atomic_increment (&rwl_nrd(rwp->__oid_nrd)); + lll_wait (&rwl_oid(rwp->__oid_nrd), rwl_oid (tmp), flags); + atomic_decrement (&rwl_nrd(rwp->__oid_nrd)); + } + } +} + +strong_alias (__pthread_rwlock_rdlock, pthread_rwlock_rdlock) + +int pthread_rwlock_tryrdlock (pthread_rwlock_t *rwp) +{ + if (rwl_owned_p (rwp, _pthread_self()->thread, rwp->__flags)) + return (EDEADLK); + + union hurd_xint tmp = { atomic_loadx (&rwp->__oid_nrd.qv) }; + if ((rwl_oid (tmp) & ID_MASK) == 0 && + catomic_casx_bool (&rwp->__oid_nrd.qv, + hurd_xint_pair (tmp.lo, tmp.hi), + hurd_xint_pair (RWLOCK_RO, rwl_nrd (tmp) + 1))) + { + if (rwl_oid (tmp) != RWLOCK_RO && rwl_nrd (tmp) > 0) + lll_wake (&rwl_oid(rwp->__oid_nrd), GSYNC_BROADCAST | + (rwp->__flags & GSYNC_SHARED)); + + return (0); + } + + return (EBUSY); +} + +int pthread_rwlock_timedrdlock (pthread_rwlock_t *rwp, + const struct timespec *abstime) +{ + int flags = rwp->__flags & GSYNC_SHARED; + + if (rwl_owned_p (rwp, _pthread_self()->thread, flags)) + return (EDEADLK); + + while (1) + { + union hurd_xint tmp = { atomic_loadx (&rwp->__oid_nrd.qv) }; + if ((rwl_oid (tmp) & ID_MASK) == 0) + { + if (catomic_casx_bool (&rwp->__oid_nrd.qv, + hurd_xint_pair (tmp.lo, tmp.hi), + hurd_xint_pair (RWLOCK_RO, rwl_nrd (tmp) + 1))) + { + if (rwl_oid (tmp) != RWLOCK_RO && rwl_nrd (tmp) > 0) + lll_wake (&rwl_oid(rwp->__oid_nrd), flags | GSYNC_BROADCAST); + + return (0); + } + } + else + { + /* The timeout parameter has to be checked on every iteration, + * because its value may not be examined if the lock can be + * taken without blocking. */ + + if (__glibc_unlikely (abstime->tv_nsec < 0 || + abstime->tv_nsec >= 1000000000)) + return (EINVAL); + + atomic_increment (&rwl_nrd(rwp->__oid_nrd)); + int ret = lll_abstimed_wait (&rwl_oid(rwp->__oid_nrd), + rwl_oid (tmp), abstime, flags); + atomic_decrement (&rwl_nrd(rwp->__oid_nrd)); + + if (ret == KERN_TIMEDOUT) + return (ETIMEDOUT); + } + } +} + +int __pthread_rwlock_wrlock (pthread_rwlock_t *rwp) +{ + int flags = rwp->__flags & GSYNC_SHARED; + unsigned int self_id = _pthread_self()->thread; + + if (rwl_owned_p (rwp, self_id, flags)) + return (EDEADLK); + + while (1) + { + unsigned int *ptr = &rwl_oid (rwp->__oid_nrd); + atomic_read_barrier (); + unsigned int owner = *ptr; + + if (owner == RWLOCK_UNOWNED) + { + if (atomic_compare_and_exchange_bool_acq (ptr, self_id, owner) == 0) + { + rwl_setown (rwp, flags); + return (0); + } + } + else + { + /* Wait on the address. We are only interested in the value of + * the OID field, but we need a different queue for writers. + * As such, we use 64-bit values, with the high word being + * the owner id. */ + unsigned int nw = atomic_exchange_and_add (--ptr, 1); + lll_xwait (ptr, nw + 1, owner, flags); + atomic_decrement (ptr); + } + } +} + +strong_alias (__pthread_rwlock_wrlock, pthread_rwlock_wrlock) + +int pthread_rwlock_trywrlock (pthread_rwlock_t *rwp) +{ + unsigned int self_id = _pthread_self()->thread; + unsigned int *ptr = &rwl_oid (rwp->__oid_nrd); + atomic_read_barrier (); + unsigned int owner = *ptr; + + if (rwl_owned_p (rwp, self_id, rwp->__flags)) + return (EDEADLK); + else if (owner == RWLOCK_UNOWNED && + atomic_compare_and_exchange_bool_acq (ptr, self_id, owner) == 0) + { + rwl_setown (rwp, rwp->__flags); + return (0); + } + + return (EBUSY); +} + +int pthread_rwlock_timedwrlock (pthread_rwlock_t *rwp, + const struct timespec *abstime) +{ + unsigned int self_id = _pthread_self()->thread; + int flags = rwp->__flags & GSYNC_SHARED; + + if (rwl_owned_p (rwp, self_id, flags)) + return (EDEADLK); + + while (1) + { + unsigned int *ptr = &rwl_oid (rwp->__oid_nrd); + atomic_read_barrier (); + unsigned int owner = *ptr; + + if (owner == RWLOCK_UNOWNED) + { + if (atomic_compare_and_exchange_bool_acq (ptr, self_id, owner) == 0) + { + rwl_setown (rwp, flags); + return (0); + } + } + else + { + if (__glibc_unlikely (abstime->tv_nsec < 0 || + abstime->tv_nsec >= 1000000000)) + return (EINVAL); + + unsigned int nw = atomic_exchange_and_add (--ptr, 1); + int ret = lll_abstimed_xwait (ptr, nw + 1, owner, abstime, flags); + nw = atomic_exchange_and_add (ptr, -1); + + if (ret == KERN_TIMEDOUT) + { + /* If we timed out, there are no writers pending, the + * lock is unowned *and* there are readers blocked, it's + * possible that a wakeup was meant for us, but we timed + * out first. In such unlikely case, we wake every reader + * in order to avoid a potential deadlock. */ + + union hurd_xint tmp = { atomic_loadx (&rwp->__oid_nrd.qv) }; + if (__glibc_unlikely (nw == 1 && rwl_nrd (tmp) > 0 && + rwl_oid (tmp) == RWLOCK_UNOWNED)) + lll_wake (&rwl_oid(rwp->__oid_nrd), flags | GSYNC_BROADCAST); + + /* We still return with an error. */ + return (ETIMEDOUT); + } + } + } +} + +int __pthread_rwlock_unlock (pthread_rwlock_t *rwp) +{ + int flags = rwp->__flags & GSYNC_SHARED; + atomic_read_barrier (); + unsigned int owner = rwl_oid (rwp->__oid_nrd); + + if ((owner & ID_MASK) != 0) + { + /* A writer holds the lock. */ + if (!rwl_owned_p (rwp, _pthread_self()->thread, flags)) + /* ... But it isn't us. */ + return (EPERM); + + rwl_spid(rwp) = 0; + rwl_oid(rwp->__oid_nrd) = RWLOCK_UNOWNED; + atomic_write_barrier (); + + /* The exclusive lock has been released. Now decide whether + * to wake a queued writer (preferred), or all the queued readers. */ + if (rwl_qwr (rwp->__oid_nrd) > 0) + lll_wake (&rwl_qwr(rwp->__oid_nrd), flags); + else if (rwl_nrd (rwp->__oid_nrd) > 0) + lll_wake (&rwl_oid(rwp->__oid_nrd), flags | GSYNC_BROADCAST); + } + else if (rwl_nrd (rwp->__oid_nrd) == 0) + return (EPERM); + else + { + union hurd_xint tmp; + while (1) + { + tmp.qv = atomic_loadx (&rwp->__oid_nrd.qv); + if (catomic_casx_bool (&rwp->__oid_nrd.qv, + hurd_xint_pair (tmp.lo, tmp.hi), + hurd_xint_pair (rwl_nrd (tmp) == 1 ? + RWLOCK_UNOWNED : RWLOCK_RO, rwl_nrd (tmp) - 1))) + break; + } + + /* As a reader, we only need to do a wakeup iff: + * - We were the last one. + * - There's at least a writer queued. */ + if (rwl_nrd (tmp) == 1 && rwl_qwr (rwp->__oid_nrd) > 0) + lll_wake (&rwl_qwr(rwp->__oid_nrd), flags); + } + + return (0); +} + +strong_alias (__pthread_rwlock_unlock, pthread_rwlock_unlock) + +int _pthread_rwlock_destroy (pthread_rwlock_t *rwp) +{ + /* XXX: Maybe we could do some sanity checks. */ + (void)rwp; + return (0); +} + +strong_alias (_pthread_rwlock_destroy, pthread_rwlock_destroy) + diff --git a/sysdeps/pthread/bits/rwlock.h b/sysdeps/pthread/bits/rwlock.h index bc27726..8226da7 100644 --- a/sysdeps/pthread/bits/rwlock.h +++ b/sysdeps/pthread/bits/rwlock.h @@ -1,5 +1,5 @@ /* rwlock type. Generic version. - Copyright (C) 2002, 2005, 2006, 2007, 2009 Free Software Foundation, Inc. + Copyright (C) 2002-2016 Free Software Foundation, Inc. This file is part of the GNU C Library. The GNU C Library is free software; you can redistribute it and/or @@ -20,27 +20,16 @@ #ifndef _BITS_RWLOCK_H #define _BITS_RWLOCK_H -#include +#include -/* User visible part of a rwlock. If __held is not held and readers - is 0, then the lock is unlocked. If __held is held and readers is - 0, then the lock is held by a writer. If __held is held and - readers is greater than 0, then the lock is held by READERS - readers. */ struct __pthread_rwlock - { - __pthread_spinlock_t __held; - __pthread_spinlock_t __lock; - int __readers; - struct __pthread *__readerqueue; - struct __pthread *__writerqueue; - struct __pthread_rwlockattr *__attr; - void *__data; - }; - -/* Initializer for a rwlock. */ -#define __PTHREAD_RWLOCK_INITIALIZER \ - { __PTHREAD_SPIN_LOCK_INITIALIZER, __PTHREAD_SPIN_LOCK_INITIALIZER, 0, 0, 0, 0, 0 } - +{ + union hurd_xint __shpid_qwr; + union hurd_xint __oid_nrd; + int __flags; +}; + +/* Static initializer for read-write locks. */ +#define __PTHREAD_RWLOCK_INITIALIZER { { 0 }, { 0 }, 0 } #endif /* bits/rwlock.h */