bug-gnulib
[Top][All Lists]
Advanced

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

Re: new module for async-safe spin locks


From: Bruno Haible
Subject: Re: new module for async-safe spin locks
Date: Fri, 03 Jul 2020 12:29:27 +0200
User-agent: KMail/5.1.3 (Linux/4.4.0-179-generic; KDE/5.18.0; x86_64; ; )

A small code unification:


2020-07-03  Bruno Haible  <bruno@clisp.org>

        asyncsafe-spin: Reduce code duplication.
        * lib/asyncsafe-spin.c: (do_lock, do_unlock): New functions.
        (asyncsafe_spin_lock, asyncsafe_spin_unlock): Use them.
        * modules/asyncsafe-spin (configure.ac): Require AC_C_INLINE.

diff --git a/lib/asyncsafe-spin.c b/lib/asyncsafe-spin.c
index 2198bcb..81e48ad 100644
--- a/lib/asyncsafe-spin.c
+++ b/lib/asyncsafe-spin.c
@@ -36,20 +36,17 @@ asyncsafe_spin_init (asyncsafe_spinlock_t *lock)
   glwthread_spin_init (lock);
 }
 
-void
-asyncsafe_spin_lock (asyncsafe_spinlock_t *lock,
-                     const sigset_t *mask, sigset_t *saved_mask)
+static inline void
+do_lock (asyncsafe_spinlock_t *lock)
 {
-  sigprocmask (SIG_BLOCK, mask, saved_mask);
   glwthread_spin_lock (lock);
 }
 
-void
-asyncsafe_spin_unlock (asyncsafe_spinlock_t *lock, const sigset_t *saved_mask)
+static inline void
+do_unlock (asyncsafe_spinlock_t *lock)
 {
   if (glwthread_spin_unlock (lock))
     abort ();
-  sigprocmask (SIG_SETMASK, saved_mask, NULL);
 }
 
 void
@@ -58,7 +55,9 @@ asyncsafe_spin_destroy (asyncsafe_spinlock_t *lock)
   glwthread_spin_destroy (lock);
 }
 
-#elif HAVE_PTHREAD_H
+#else
+
+# if HAVE_PTHREAD_H
 /* Use POSIX threads.  */
 
 /* We don't use semaphores (although sem_post() is allowed in signal handlers),
@@ -68,14 +67,14 @@ asyncsafe_spin_destroy (asyncsafe_spinlock_t *lock)
    We don't use the C11 <stdatomic.h> (available in GCC >= 4.9) because it 
would
    require to link with -latomic.  */
 
-# if (__GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 7)) && !defined 
__ibmxl__
+#  if (__GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 7)) && !defined 
__ibmxl__
 /* Use GCC built-ins (available in GCC >= 4.7) that operate on the first byte 
of
    the lock.
    Documentation:
    
<https://gcc.gnu.org/onlinedocs/gcc-4.7.0/gcc/_005f_005fatomic-Builtins.html>
  */
 
-#  if 1
+#   if 1
 /* An implementation that verifies the unlocks.  */
 
 void
@@ -84,12 +83,9 @@ asyncsafe_spin_init (asyncsafe_spinlock_t *lock)
   __atomic_store_n (lock, 0, __ATOMIC_SEQ_CST);
 }
 
-void
-asyncsafe_spin_lock (asyncsafe_spinlock_t *lock,
-                     const sigset_t *mask, sigset_t *saved_mask)
+static inline void
+do_lock (asyncsafe_spinlock_t *lock)
 {
-  sigprocmask (SIG_BLOCK, mask, saved_mask); /* equivalent to pthread_sigmask 
*/
-
   /* Wait until *lock becomes 0, then replace it with 1.  */
   asyncsafe_spinlock_t zero;
   while (!(zero = 0,
@@ -98,19 +94,17 @@ asyncsafe_spin_lock (asyncsafe_spinlock_t *lock,
     ;
 }
 
-void
-asyncsafe_spin_unlock (asyncsafe_spinlock_t *lock, const sigset_t *saved_mask)
+static inline void
+do_unlock (asyncsafe_spinlock_t *lock)
 {
   /* If *lock is 1, then replace it with 0.  */
   asyncsafe_spinlock_t one = 1;
   if (!__atomic_compare_exchange_n (lock, &one, 0, false,
                                     __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST))
     abort ();
-
-  sigprocmask (SIG_SETMASK, saved_mask, NULL); /* equivalent to 
pthread_sigmask */
 }
 
-#  else
+#   else
 /* An implementation that is a little bit more optimized, but does not verify
    the unlocks.  */
 
@@ -120,32 +114,22 @@ asyncsafe_spin_init (asyncsafe_spinlock_t *lock)
   __atomic_clear (lock, __ATOMIC_SEQ_CST);
 }
 
-void
-asyncsafe_spin_lock (asyncsafe_spinlock_t *lock,
-                     const sigset_t *mask, sigset_t *saved_mask)
+static inline void
+do_lock (asyncsafe_spinlock_t *lock)
 {
-  sigprocmask (SIG_BLOCK, mask, saved_mask); /* equivalent to pthread_sigmask 
*/
-
   while (__atomic_test_and_set (lock, __ATOMIC_SEQ_CST))
     ;
 }
 
-void
-asyncsafe_spin_unlock (asyncsafe_spinlock_t *lock, const sigset_t *saved_mask)
+static inline void
+do_unlock (asyncsafe_spinlock_t *lock)
 {
   __atomic_clear (lock, __ATOMIC_SEQ_CST);
-
-  sigprocmask (SIG_SETMASK, saved_mask, NULL); /* equivalent to 
pthread_sigmask */
 }
 
-#  endif
-
-void
-asyncsafe_spin_destroy (asyncsafe_spinlock_t *lock)
-{
-}
+#   endif
 
-# elif (__GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 1)) && !defined 
__ibmxl__
+#  elif (__GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 1)) && !defined 
__ibmxl__
 /* Use GCC built-ins (available in GCC >= 4.1).
    Documentation:
    <https://gcc.gnu.org/onlinedocs/gcc-4.1.2/gcc/Atomic-Builtins.html>  */
@@ -158,33 +142,23 @@ asyncsafe_spin_init (asyncsafe_spinlock_t *lock)
   __sync_synchronize ();
 }
 
-void
-asyncsafe_spin_lock (asyncsafe_spinlock_t *lock,
-                     const sigset_t *mask, sigset_t *saved_mask)
+static inline void
+do_lock (asyncsafe_spinlock_t *lock)
 {
-  sigprocmask (SIG_BLOCK, mask, saved_mask); /* equivalent to pthread_sigmask 
*/
-
   /* Wait until *lock becomes 0, then replace it with 1.  */
   while (__sync_val_compare_and_swap (lock, 0, 1) != 0)
     ;
 }
 
-void
-asyncsafe_spin_unlock (asyncsafe_spinlock_t *lock, const sigset_t *saved_mask)
+static inline void
+do_unlock (asyncsafe_spinlock_t *lock)
 {
   /* If *lock is 1, then replace it with 0.  */
   if (__sync_val_compare_and_swap (lock, 1, 0) != 1)
     abort ();
-
-  sigprocmask (SIG_SETMASK, saved_mask, NULL); /* equivalent to 
pthread_sigmask */
-}
-
-void
-asyncsafe_spin_destroy (asyncsafe_spinlock_t *lock)
-{
 }
 
-# elif defined _AIX
+#  elif defined _AIX
 /* AIX */
 
 void
@@ -194,33 +168,23 @@ asyncsafe_spin_init (asyncsafe_spinlock_t *lock)
   _clear_lock (vp, 0);
 }
 
-void
-asyncsafe_spin_lock (asyncsafe_spinlock_t *lock,
-                     const sigset_t *mask, sigset_t *saved_mask)
+static inline void
+do_lock (asyncsafe_spinlock_t *lock)
 {
-  sigprocmask (SIG_BLOCK, mask, saved_mask); /* equivalent to pthread_sigmask 
*/
-
   atomic_p vp = (int *) lock;
   while (_check_lock (vp, 0, 1))
     ;
 }
 
-void
-asyncsafe_spin_unlock (asyncsafe_spinlock_t *lock, const sigset_t *saved_mask)
+static inline void
+do_unlock (asyncsafe_spinlock_t *lock)
 {
   atomic_p vp = (int *) lock;
   if (_check_lock (vp, 1, 0))
     abort ();
-
-  sigprocmask (SIG_SETMASK, saved_mask, NULL); /* equivalent to 
pthread_sigmask */
 }
 
-void
-asyncsafe_spin_destroy (asyncsafe_spinlock_t *lock)
-{
-}
-
-# elif (defined __GNUC__ || defined __SUNPRO_C) && (defined __sparc || defined 
__i386 || defined __x86_64__)
+#  elif (defined __GNUC__ || defined __SUNPRO_C) && (defined __sparc || 
defined __i386 || defined __x86_64__)
 /* For older versions of GCC, use inline assembly.
    GCC and the Oracle Studio C compiler understand GCC's extended asm syntax,
    but the plain Solaris cc understands only simple asm.  */
@@ -229,21 +193,21 @@ asyncsafe_spin_destroy (asyncsafe_spinlock_t *lock)
 static void
 memory_barrier (void)
 {
-#  if defined __GNUC__
-#   if defined __i386 || defined __x86_64__
+#   if defined __GNUC__
+#    if defined __i386 || defined __x86_64__
   asm volatile ("mfence");
-#   endif
-#   if defined __sparc
+#    endif
+#    if defined __sparc
   asm volatile ("membar 2");
-#   endif
-#  else
-#   if defined __i386 || defined __x86_64__
+#    endif
+#   else
+#    if defined __i386 || defined __x86_64__
   asm ("mfence");
-#   endif
-#   if defined __sparc
+#    endif
+#    if defined __sparc
   asm ("membar 2");
+#    endif
 #   endif
-#  endif
 }
 
 /* Store NEWVAL in *VP if the old value *VP is == CMP.
@@ -252,33 +216,33 @@ static unsigned int
 atomic_compare_and_swap (volatile unsigned int *vp, unsigned int cmp,
                          unsigned int newval)
 {
-#  if defined __GNUC__
+#   if defined __GNUC__
   unsigned int oldval;
-#   if defined __i386 || defined __x86_64__
+#    if defined __i386 || defined __x86_64__
   asm volatile (" lock\n cmpxchgl %3,(%1)"
                 : "=a" (oldval) : "r" (vp), "a" (cmp), "r" (newval) : 
"memory");
-#   endif
-#   if defined __sparc
+#    endif
+#    if defined __sparc
   asm volatile (" cas [%1],%2,%3\n"
                 " mov %3,%0"
                 : "=r" (oldval) : "r" (vp), "r" (cmp), "r" (newval) : 
"memory");
-#   endif
+#    endif
   return oldval;
-#  else /* __SUNPRO_C */
-#   if defined __x86_64__
+#   else /* __SUNPRO_C */
+#    if defined __x86_64__
   asm (" movl %esi,%eax\n"
        " lock\n cmpxchgl %edx,(%rdi)");
-#   elif defined __i386
+#    elif defined __i386
   asm (" movl 16(%ebp),%ecx\n"
        " movl 12(%ebp),%eax\n"
        " movl 8(%ebp),%edx\n"
        " lock\n cmpxchgl %ecx,(%edx)");
-#   endif
-#   if defined __sparc
+#    endif
+#    if defined __sparc
   asm (" cas [%i0],%i1,%i2\n"
        " mov %i2,%i0");
+#    endif
 #   endif
-#  endif
 }
 
 void
@@ -289,33 +253,23 @@ asyncsafe_spin_init (asyncsafe_spinlock_t *lock)
   memory_barrier ();
 }
 
-void
-asyncsafe_spin_lock (asyncsafe_spinlock_t *lock,
-                     const sigset_t *mask, sigset_t *saved_mask)
+static inline void
+do_lock (asyncsafe_spinlock_t *lock)
 {
-  sigprocmask (SIG_BLOCK, mask, saved_mask); /* equivalent to pthread_sigmask 
*/
-
   volatile unsigned int *vp = lock;
   while (atomic_compare_and_swap (vp, 0, 1) != 0)
     ;
 }
 
-void
-asyncsafe_spin_unlock (asyncsafe_spinlock_t *lock, const sigset_t *saved_mask)
+static inline void
+do_unlock (asyncsafe_spinlock_t *lock)
 {
   volatile unsigned int *vp = lock;
   if (atomic_compare_and_swap (vp, 1, 0) != 1)
     abort ();
-
-  sigprocmask (SIG_SETMASK, saved_mask, NULL); /* equivalent to 
pthread_sigmask */
-}
-
-void
-asyncsafe_spin_destroy (asyncsafe_spinlock_t *lock)
-{
 }
 
-# else
+#  else
 /* Fallback code.  It has some race conditions.  */
 
 void
@@ -325,35 +279,25 @@ asyncsafe_spin_init (asyncsafe_spinlock_t *lock)
   *vp = 0;
 }
 
-void
-asyncsafe_spin_lock (asyncsafe_spinlock_t *lock,
-                     const sigset_t *mask, sigset_t *saved_mask)
+static inline void
+do_lock (asyncsafe_spinlock_t *lock)
 {
-  sigprocmask (SIG_BLOCK, mask, saved_mask); /* equivalent to pthread_sigmask 
*/
-
   volatile unsigned int *vp = lock;
   while (*vp)
     ;
   *vp = 1;
 }
 
-void
-asyncsafe_spin_unlock (asyncsafe_spinlock_t *lock, const sigset_t *saved_mask)
+static inline void
+do_unlock (asyncsafe_spinlock_t *lock)
 {
   volatile unsigned int *vp = lock;
   *vp = 0;
-
-  sigprocmask (SIG_SETMASK, saved_mask, NULL); /* equivalent to 
pthread_sigmask */
-}
-
-void
-asyncsafe_spin_destroy (asyncsafe_spinlock_t *lock)
-{
 }
 
-# endif
+#  endif
 
-#else
+# else
 /* Provide a dummy implementation for single-threaded applications.  */
 
 void
@@ -361,22 +305,36 @@ asyncsafe_spin_init (asyncsafe_spinlock_t *lock)
 {
 }
 
-void
-asyncsafe_spin_lock (asyncsafe_spinlock_t *lock,
-                     const sigset_t *mask, sigset_t *saved_mask)
+static inline void
+do_lock (asyncsafe_spinlock_t *lock)
 {
-  sigprocmask (SIG_BLOCK, mask, saved_mask);
 }
 
-void
-asyncsafe_spin_unlock (asyncsafe_spinlock_t *lock, const sigset_t *saved_mask)
+static inline void
+do_unlock (asyncsafe_spinlock_t *lock)
 {
-  sigprocmask (SIG_SETMASK, saved_mask, NULL);
 }
 
+# endif
+
 void
 asyncsafe_spin_destroy (asyncsafe_spinlock_t *lock)
 {
 }
 
 #endif
+
+void
+asyncsafe_spin_lock (asyncsafe_spinlock_t *lock,
+                     const sigset_t *mask, sigset_t *saved_mask)
+{
+  sigprocmask (SIG_BLOCK, mask, saved_mask); /* equivalent to pthread_sigmask 
*/
+  do_lock (lock);
+}
+
+void
+asyncsafe_spin_unlock (asyncsafe_spinlock_t *lock, const sigset_t *saved_mask)
+{
+  do_unlock (lock);
+  sigprocmask (SIG_SETMASK, saved_mask, NULL); /* equivalent to 
pthread_sigmask */
+}
diff --git a/modules/asyncsafe-spin b/modules/asyncsafe-spin
index 40d181b..fa27b08 100644
--- a/modules/asyncsafe-spin
+++ b/modules/asyncsafe-spin
@@ -12,6 +12,7 @@ sigprocmask
 windows-spin
 
 configure.ac:
+AC_REQUIRE([AC_C_INLINE])
 AC_CHECK_HEADERS_ONCE([pthread.h])
 
 Makefile.am:




reply via email to

[Prev in Thread] Current Thread [Next in Thread]