>From 79db2daddd96952239a77b74ebea9d40ccd5878b Mon Sep 17 00:00:00 2001 From: Bruno Haible Date: Wed, 1 Jul 2020 22:59:08 +0200 Subject: [PATCH 1/2] asyncsafe-spin: New module. * lib/asyncsafe-spin.h: New file. * lib/asyncsafe-spin.c: New file, based on lib/pthread-spin.c. * modules/asyncsafe-spin: New file. --- ChangeLog | 7 + lib/asyncsafe-spin.c | 382 +++++++++++++++++++++++++++++++++++++++++++++++++ lib/asyncsafe-spin.h | 69 +++++++++ modules/asyncsafe-spin | 27 ++++ 4 files changed, 485 insertions(+) create mode 100644 lib/asyncsafe-spin.c create mode 100644 lib/asyncsafe-spin.h create mode 100644 modules/asyncsafe-spin diff --git a/ChangeLog b/ChangeLog index 0051d82..179725a 100644 --- a/ChangeLog +++ b/ChangeLog @@ -1,5 +1,12 @@ 2020-07-01 Bruno Haible + asyncsafe-spin: New module. + * lib/asyncsafe-spin.h: New file. + * lib/asyncsafe-spin.c: New file, based on lib/pthread-spin.c. + * modules/asyncsafe-spin: New file. + +2020-07-01 Bruno Haible + windows-spin: Fix race condition on multiprocessor systems. * lib/windows-spin.c (glwthread_spin_init): Add a memory barrier. diff --git a/lib/asyncsafe-spin.c b/lib/asyncsafe-spin.c new file mode 100644 index 0000000..2198bcb --- /dev/null +++ b/lib/asyncsafe-spin.c @@ -0,0 +1,382 @@ +/* Spin locks for communication between threads and signal handlers. + Copyright (C) 2020 Free Software Foundation, Inc. + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 2, or (at your option) + any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; if not, see . */ + +/* Written by Bruno Haible , 2020. */ + +#include + +/* Specification. */ +#include "asyncsafe-spin.h" + +#include +#include +#if defined _AIX +# include +#endif + +#if defined _WIN32 && ! defined __CYGWIN__ +/* Use Windows threads. */ + +void +asyncsafe_spin_init (asyncsafe_spinlock_t *lock) +{ + glwthread_spin_init (lock); +} + +void +asyncsafe_spin_lock (asyncsafe_spinlock_t *lock, + const sigset_t *mask, sigset_t *saved_mask) +{ + sigprocmask (SIG_BLOCK, mask, saved_mask); + glwthread_spin_lock (lock); +} + +void +asyncsafe_spin_unlock (asyncsafe_spinlock_t *lock, const sigset_t *saved_mask) +{ + if (glwthread_spin_unlock (lock)) + abort (); + sigprocmask (SIG_SETMASK, saved_mask, NULL); +} + +void +asyncsafe_spin_destroy (asyncsafe_spinlock_t *lock) +{ + glwthread_spin_destroy (lock); +} + +#elif HAVE_PTHREAD_H +/* Use POSIX threads. */ + +/* We don't use semaphores (although sem_post() is allowed in signal handlers), + because it would require to link with -lrt on HP-UX 11, OSF/1, Solaris 10, + and also because on macOS only named semaphores work. + + We don't use the C11 (available in GCC >= 4.9) because it would + require to link with -latomic. */ + +# if (__GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 7)) && !defined __ibmxl__ +/* Use GCC built-ins (available in GCC >= 4.7) that operate on the first byte of + the lock. + Documentation: + + */ + +# if 1 +/* An implementation that verifies the unlocks. */ + +void +asyncsafe_spin_init (asyncsafe_spinlock_t *lock) +{ + __atomic_store_n (lock, 0, __ATOMIC_SEQ_CST); +} + +void +asyncsafe_spin_lock (asyncsafe_spinlock_t *lock, + const sigset_t *mask, sigset_t *saved_mask) +{ + sigprocmask (SIG_BLOCK, mask, saved_mask); /* equivalent to pthread_sigmask */ + + /* Wait until *lock becomes 0, then replace it with 1. */ + asyncsafe_spinlock_t zero; + while (!(zero = 0, + __atomic_compare_exchange_n (lock, &zero, 1, false, + __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST))) + ; +} + +void +asyncsafe_spin_unlock (asyncsafe_spinlock_t *lock, const sigset_t *saved_mask) +{ + /* If *lock is 1, then replace it with 0. */ + asyncsafe_spinlock_t one = 1; + if (!__atomic_compare_exchange_n (lock, &one, 0, false, + __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST)) + abort (); + + sigprocmask (SIG_SETMASK, saved_mask, NULL); /* equivalent to pthread_sigmask */ +} + +# else +/* An implementation that is a little bit more optimized, but does not verify + the unlocks. */ + +void +asyncsafe_spin_init (asyncsafe_spinlock_t *lock) +{ + __atomic_clear (lock, __ATOMIC_SEQ_CST); +} + +void +asyncsafe_spin_lock (asyncsafe_spinlock_t *lock, + const sigset_t *mask, sigset_t *saved_mask) +{ + sigprocmask (SIG_BLOCK, mask, saved_mask); /* equivalent to pthread_sigmask */ + + while (__atomic_test_and_set (lock, __ATOMIC_SEQ_CST)) + ; +} + +void +asyncsafe_spin_unlock (asyncsafe_spinlock_t *lock, const sigset_t *saved_mask) +{ + __atomic_clear (lock, __ATOMIC_SEQ_CST); + + sigprocmask (SIG_SETMASK, saved_mask, NULL); /* equivalent to pthread_sigmask */ +} + +# endif + +void +asyncsafe_spin_destroy (asyncsafe_spinlock_t *lock) +{ +} + +# elif (__GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 1)) && !defined __ibmxl__ +/* Use GCC built-ins (available in GCC >= 4.1). + Documentation: + */ + +void +asyncsafe_spin_init (asyncsafe_spinlock_t *lock) +{ + volatile unsigned int *vp = lock; + *vp = 0; + __sync_synchronize (); +} + +void +asyncsafe_spin_lock (asyncsafe_spinlock_t *lock, + const sigset_t *mask, sigset_t *saved_mask) +{ + sigprocmask (SIG_BLOCK, mask, saved_mask); /* equivalent to pthread_sigmask */ + + /* Wait until *lock becomes 0, then replace it with 1. */ + while (__sync_val_compare_and_swap (lock, 0, 1) != 0) + ; +} + +void +asyncsafe_spin_unlock (asyncsafe_spinlock_t *lock, const sigset_t *saved_mask) +{ + /* If *lock is 1, then replace it with 0. */ + if (__sync_val_compare_and_swap (lock, 1, 0) != 1) + abort (); + + sigprocmask (SIG_SETMASK, saved_mask, NULL); /* equivalent to pthread_sigmask */ +} + +void +asyncsafe_spin_destroy (asyncsafe_spinlock_t *lock) +{ +} + +# elif defined _AIX +/* AIX */ + +void +asyncsafe_spin_init (asyncsafe_spinlock_t *lock) +{ + atomic_p vp = (int *) lock; + _clear_lock (vp, 0); +} + +void +asyncsafe_spin_lock (asyncsafe_spinlock_t *lock, + const sigset_t *mask, sigset_t *saved_mask) +{ + sigprocmask (SIG_BLOCK, mask, saved_mask); /* equivalent to pthread_sigmask */ + + atomic_p vp = (int *) lock; + while (_check_lock (vp, 0, 1)) + ; +} + +void +asyncsafe_spin_unlock (asyncsafe_spinlock_t *lock, const sigset_t *saved_mask) +{ + atomic_p vp = (int *) lock; + if (_check_lock (vp, 1, 0)) + abort (); + + sigprocmask (SIG_SETMASK, saved_mask, NULL); /* equivalent to pthread_sigmask */ +} + +void +asyncsafe_spin_destroy (asyncsafe_spinlock_t *lock) +{ +} + +# elif (defined __GNUC__ || defined __SUNPRO_C) && (defined __sparc || defined __i386 || defined __x86_64__) +/* For older versions of GCC, use inline assembly. + GCC and the Oracle Studio C compiler understand GCC's extended asm syntax, + but the plain Solaris cc understands only simple asm. */ +/* An implementation that verifies the unlocks. */ + +static void +memory_barrier (void) +{ +# if defined __GNUC__ +# if defined __i386 || defined __x86_64__ + asm volatile ("mfence"); +# endif +# if defined __sparc + asm volatile ("membar 2"); +# endif +# else +# if defined __i386 || defined __x86_64__ + asm ("mfence"); +# endif +# if defined __sparc + asm ("membar 2"); +# endif +# endif +} + +/* Store NEWVAL in *VP if the old value *VP is == CMP. + Return the old value. */ +static unsigned int +atomic_compare_and_swap (volatile unsigned int *vp, unsigned int cmp, + unsigned int newval) +{ +# if defined __GNUC__ + unsigned int oldval; +# if defined __i386 || defined __x86_64__ + asm volatile (" lock\n cmpxchgl %3,(%1)" + : "=a" (oldval) : "r" (vp), "a" (cmp), "r" (newval) : "memory"); +# endif +# if defined __sparc + asm volatile (" cas [%1],%2,%3\n" + " mov %3,%0" + : "=r" (oldval) : "r" (vp), "r" (cmp), "r" (newval) : "memory"); +# endif + return oldval; +# else /* __SUNPRO_C */ +# if defined __x86_64__ + asm (" movl %esi,%eax\n" + " lock\n cmpxchgl %edx,(%rdi)"); +# elif defined __i386 + asm (" movl 16(%ebp),%ecx\n" + " movl 12(%ebp),%eax\n" + " movl 8(%ebp),%edx\n" + " lock\n cmpxchgl %ecx,(%edx)"); +# endif +# if defined __sparc + asm (" cas [%i0],%i1,%i2\n" + " mov %i2,%i0"); +# endif +# endif +} + +void +asyncsafe_spin_init (asyncsafe_spinlock_t *lock) +{ + volatile unsigned int *vp = lock; + *vp = 0; + memory_barrier (); +} + +void +asyncsafe_spin_lock (asyncsafe_spinlock_t *lock, + const sigset_t *mask, sigset_t *saved_mask) +{ + sigprocmask (SIG_BLOCK, mask, saved_mask); /* equivalent to pthread_sigmask */ + + volatile unsigned int *vp = lock; + while (atomic_compare_and_swap (vp, 0, 1) != 0) + ; +} + +void +asyncsafe_spin_unlock (asyncsafe_spinlock_t *lock, const sigset_t *saved_mask) +{ + volatile unsigned int *vp = lock; + if (atomic_compare_and_swap (vp, 1, 0) != 1) + abort (); + + sigprocmask (SIG_SETMASK, saved_mask, NULL); /* equivalent to pthread_sigmask */ +} + +void +asyncsafe_spin_destroy (asyncsafe_spinlock_t *lock) +{ +} + +# else +/* Fallback code. It has some race conditions. */ + +void +asyncsafe_spin_init (asyncsafe_spinlock_t *lock) +{ + volatile unsigned int *vp = lock; + *vp = 0; +} + +void +asyncsafe_spin_lock (asyncsafe_spinlock_t *lock, + const sigset_t *mask, sigset_t *saved_mask) +{ + sigprocmask (SIG_BLOCK, mask, saved_mask); /* equivalent to pthread_sigmask */ + + volatile unsigned int *vp = lock; + while (*vp) + ; + *vp = 1; +} + +void +asyncsafe_spin_unlock (asyncsafe_spinlock_t *lock, const sigset_t *saved_mask) +{ + volatile unsigned int *vp = lock; + *vp = 0; + + sigprocmask (SIG_SETMASK, saved_mask, NULL); /* equivalent to pthread_sigmask */ +} + +void +asyncsafe_spin_destroy (asyncsafe_spinlock_t *lock) +{ +} + +# endif + +#else +/* Provide a dummy implementation for single-threaded applications. */ + +void +asyncsafe_spin_init (asyncsafe_spinlock_t *lock) +{ +} + +void +asyncsafe_spin_lock (asyncsafe_spinlock_t *lock, + const sigset_t *mask, sigset_t *saved_mask) +{ + sigprocmask (SIG_BLOCK, mask, saved_mask); +} + +void +asyncsafe_spin_unlock (asyncsafe_spinlock_t *lock, const sigset_t *saved_mask) +{ + sigprocmask (SIG_SETMASK, saved_mask, NULL); +} + +void +asyncsafe_spin_destroy (asyncsafe_spinlock_t *lock) +{ +} + +#endif diff --git a/lib/asyncsafe-spin.h b/lib/asyncsafe-spin.h new file mode 100644 index 0000000..7e3693f --- /dev/null +++ b/lib/asyncsafe-spin.h @@ -0,0 +1,69 @@ +/* Spin locks for communication between threads and signal handlers. + Copyright (C) 2020 Free Software Foundation, Inc. + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 2, or (at your option) + any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; if not, see . */ + +/* Written by Bruno Haible , 2020. */ + +#ifndef _ASYNCSAFE_SPIN_H +#define _ASYNCSAFE_SPIN_H + +/* Usual spin locks are not allowed for communication between threads and signal + handlers, because the pthread_spin_* functions are not async-safe; see + + section 2.4.3 Signal Actions. + + This module provides spin locks with a similar API. It can be used like this, + both in regular multithreaded code and in signal handlers: + + sigset_t saved_mask; + asyncsafe_spin_lock (&lock, &mask, &saved_mask); + do_something_contentious (); + asyncsafe_spin_unlock (&lock, &saved_mask); + + The mask you specify here is the set of signals whose handlers might want to + take the same lock. + + asyncsafe_spin_lock/unlock use pthread_sigmask, to ensure that while a thread + is executing such code, no signal handler will start such code for the same + lock *in the same thread* (because if this happened, the signal handler would + hang!). */ + +#include + +#if defined _WIN32 && ! defined __CYGWIN__ +# include "windows-spin.h" +typedef glwthread_spinlock_t asyncsafe_spinlock_t; +# define ASYNCSAFE_SPIN_INIT GLWTHREAD_SPIN_INIT +#else +typedef unsigned int asyncsafe_spinlock_t; +# define ASYNCSAFE_SPIN_INIT 0 +#endif + +#ifdef __cplusplus +extern "C" { +#endif + +extern void asyncsafe_spin_init (asyncsafe_spinlock_t *lock); +extern void asyncsafe_spin_lock (asyncsafe_spinlock_t *lock, + const sigset_t *mask, sigset_t *saved_mask); +extern void asyncsafe_spin_unlock (asyncsafe_spinlock_t *lock, + const sigset_t *saved_mask); +extern void asyncsafe_spin_destroy (asyncsafe_spinlock_t *lock); + +#ifdef __cplusplus +} +#endif + +#endif /* _ASYNCSAFE_SPIN_H */ diff --git a/modules/asyncsafe-spin b/modules/asyncsafe-spin new file mode 100644 index 0000000..40d181b --- /dev/null +++ b/modules/asyncsafe-spin @@ -0,0 +1,27 @@ +Description: +Spin locks for communication between threads and signal handlers. + +Files: +lib/asyncsafe-spin.h +lib/asyncsafe-spin.c + +Depends-on: +signal-h +stdbool +sigprocmask +windows-spin + +configure.ac: +AC_CHECK_HEADERS_ONCE([pthread.h]) + +Makefile.am: +lib_SOURCES += asyncsafe-spin.c + +Include: +"asyncsafe-spin.h" + +License: +LGPLv2+ + +Maintainer: +all -- 2.7.4