bug-gnulib
[Top][All Lists]
Advanced

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

pthread-spin: add error checking


From: Bruno Haible
Subject: pthread-spin: add error checking
Date: Wed, 01 Jul 2020 22:20:36 +0200
User-agent: KMail/5.1.3 (Linux/4.4.0-179-generic; KDE/5.18.0; x86_64; ; )

This patch modifies the fallback implementation of the POSIX spin-locks
so that the unlock operation verifies that the lock is indeed locked.


2020-07-01  Bruno Haible  <bruno@clisp.org>

        pthread-spin: Add error checking.
        * lib/pthread-spin.c: Include <stdbool.h>.
        (pthread_spin_init, pthread_spin_lock, pthread_spin_trylock,
        pthread_spin_unlock) [GCC>=4.7]: Prefer an implementation that verifies
        the unlocks.
        * modules/pthread-spin (Depends-on): Add stdbool.

diff --git a/lib/pthread-spin.c b/lib/pthread-spin.c
index 93e8b37..73eb9f3 100644
--- a/lib/pthread-spin.c
+++ b/lib/pthread-spin.c
@@ -21,6 +21,8 @@
 /* Specification.  */
 #include <pthread.h>
 
+#include <stdbool.h>
+
 #if (defined _WIN32 && ! defined __CYGWIN__) && USE_WINDOWS_THREADS
 # include "windows-spin.h"
 #endif
@@ -72,6 +74,55 @@ pthread_spin_destroy (pthread_spinlock_t *lock)
    Documentation:
    
<https://gcc.gnu.org/onlinedocs/gcc-4.7.0/gcc/_005f_005fatomic-Builtins.html>  
*/
 
+#  if 1
+/* An implementation that verifies the unlocks.  */
+
+int
+pthread_spin_init (pthread_spinlock_t *lock,
+                   int shared_across_processes _GL_UNUSED)
+{
+  __atomic_store_n ((char *) lock, 0, __ATOMIC_SEQ_CST);
+  return 0;
+}
+
+int
+pthread_spin_lock (pthread_spinlock_t *lock)
+{
+  /* Wait until *lock becomes 0, then replace it with 1.  */
+  asyncsafe_spinlock_t zero;
+  while (!(zero = 0,
+           __atomic_compare_exchange_n ((char *) lock, &zero, 1, false,
+                                        __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST)))
+    ;
+  return 0;
+}
+
+int
+pthread_spin_trylock (pthread_spinlock_t *lock)
+{
+  asyncsafe_spinlock_t zero;
+  if (!(zero = 0,
+        __atomic_compare_exchange_n ((char *) lock, &zero, 1, false,
+                                     __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST)))
+    return EBUSY;
+  return 0;
+}
+
+int
+pthread_spin_unlock (pthread_spinlock_t *lock)
+{
+  /* If *lock is 1, then replace it with 0.  */
+  asyncsafe_spinlock_t one = 1;
+  if (!__atomic_compare_exchange_n ((char *) lock, &one, 0, false,
+                                    __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST))
+    abort ();
+  return 0;
+}
+
+#  else
+/* An implementation that is a little bit more optimized, but does not verify
+   the unlocks.  */
+
 int
 pthread_spin_init (pthread_spinlock_t *lock,
                    int shared_across_processes _GL_UNUSED)
@@ -103,6 +154,8 @@ pthread_spin_unlock (pthread_spinlock_t *lock)
   return 0;
 }
 
+#  endif
+
 int
 pthread_spin_destroy (pthread_spinlock_t *lock)
 {
diff --git a/modules/pthread-spin b/modules/pthread-spin
index 53e0051..ce69944 100644
--- a/modules/pthread-spin
+++ b/modules/pthread-spin
@@ -7,6 +7,7 @@ m4/pthread-spin.m4
 
 Depends-on:
 pthread-h
+stdbool         [test $HAVE_PTHREAD_SPIN_INIT = 0 || test 
$REPLACE_PTHREAD_SPIN_INIT = 1]
 windows-spin    [test $gl_threads_api = windows]
 
 configure.ac:




reply via email to

[Prev in Thread] Current Thread [Next in Thread]