This is the mail archive of the libc-alpha@sourceware.cygnus.com mailing list for the glibc project.


Index Nav: [Date Index] [Subject Index] [Author Index] [Thread Index]
Message Nav: [Date Prev] [Date Next] [Thread Prev] [Thread Next]

LinuxThreads read write locks



I took Gabriel L. Somlo's test program that he posted to comp.programming.threads,
and linked it against my latest patched libpthread.a.  It runs, and produces
the expected results, the same output as Gabriel got on Solaris.

This makes me confident that I didn't introduce any obvious breakage. 

The latest patch also supports recursive read locks, something I have
to yet test.

But in the spirit of ``release early, release often'', here it is.
This is against 2.1.3-pre2 code:

[ diff from here to EOM ]

Index: linuxthreads/internals.h
diff -u linuxthreads/internals.h:1.5 linuxthreads/internals.h:1.5.2.1
--- linuxthreads/internals.h:1.5	Sat Jan  8 21:38:16 2000
+++ linuxthreads/internals.h	Tue Jan 11 12:48:05 2000
@@ -102,6 +102,22 @@
   int p_spinlock;
 };
 
+/* Context info for read write locks. The pthread_rwlock_info structure
+   is information about a lock that has been read-locked by the thread
+   in whose list this structure appears. The pthread_rwlock_context
+   is embedded in the thread context and contains a pointer to the
+   head of the list of lock info structures, as well as a count of
+   read locks that are untracked, because no info structure could be
+   allocated for them. */
+
+struct _pthread_rwlock_t;
+
+typedef struct _pthread_rwlock_info {
+  struct _pthread_rwlock_info *pr_next;
+  struct _pthread_rwlock_t *pr_lock;
+  int pr_lock_count;
+} pthread_readlock_info;
+
 struct _pthread_descr_struct {
   pthread_descr p_nextlive, p_prevlive;
                                 /* Double chaining of active threads */
@@ -144,6 +160,9 @@
 					   called on thread */
   char p_woken_by_cancel;       /* cancellation performed wakeup */
   pthread_extricate_if *p_extricate; /* See above */
+  pthread_readlock_info *p_readlock_list;  /* List of readlock info structs */
+  pthread_readlock_info *p_readlock_free;  /* Free list of structs */
+  int p_untracked_readlock_count;	/* Readlocks not tracked by list */
   /* New elements must be added at the end.  */
 } __attribute__ ((aligned(32))); /* We need to align the structure so that
 				    doubles are aligned properly.  This is 8
Index: linuxthreads/manager.c
diff -u linuxthreads/manager.c:1.1.1.2 linuxthreads/manager.c:1.1.1.2.2.1
--- linuxthreads/manager.c:1.1.1.2	Sun Jan  2 18:03:12 2000
+++ linuxthreads/manager.c	Tue Jan 11 12:48:05 2000
@@ -500,6 +500,8 @@
 static void pthread_free(pthread_descr th)
 {
   pthread_handle handle;
+  pthread_readlock_info *iter, *next;
+
   ASSERT(th->p_exited);
   /* Make the handle invalid */
   handle =  thread_handle(th->p_tid);
@@ -512,6 +514,23 @@
 #endif
   /* One fewer threads in __pthread_handles */
   __pthread_handles_num--;
+
+  /* Destroy read lock list, and list of free read lock structures. 
+     If the former is not empty, it means the thread exited while
+     holding read locks! */
+
+  for (iter = th->p_readlock_list; iter != NULL; iter = next)
+    {
+      next = iter->pr_next;
+      free(iter);
+    }
+
+  for (iter = th->p_readlock_free; iter != NULL; iter = next)
+    {
+      next = iter->pr_next;
+      free(iter);
+    }
+
   /* If initial thread, nothing to free */
   if (th == &__pthread_initial_thread) return;
   if (!th->p_userstack)
Index: linuxthreads/pthread.c
diff -u linuxthreads/pthread.c:1.7 linuxthreads/pthread.c:1.7.2.1
--- linuxthreads/pthread.c:1.7	Sat Jan  8 21:38:16 2000
+++ linuxthreads/pthread.c	Tue Jan 11 12:48:05 2000
@@ -70,7 +70,10 @@
   {{{0, }}, 0, NULL},         /* td_eventbuf_t p_eventbuf */
   ATOMIC_INITIALIZER,         /* struct pthread_atomic p_resume_count */
   0,                          /* char p_woken_by_cancel */
-  NULL                        /* struct pthread_extricate_if *p_extricate */
+  NULL,                       /* struct pthread_extricate_if *p_extricate */
+  NULL,	                      /* pthread_readlock_info *p_readlock_list; */
+  NULL,                       /* pthread_readlock_info *p_readlock_free; */
+  0                           /* int p_untracked_readlock_count; */
 };
 
 /* Descriptor of the manager thread; none of this is used but the error
@@ -117,7 +120,10 @@
   {{{0, }}, 0, NULL},         /* td_eventbuf_t p_eventbuf */
   ATOMIC_INITIALIZER,         /* struct pthread_atomic p_resume_count */
   0,                          /* char p_woken_by_cancel */
-  NULL                        /* struct pthread_extricate_if *p_extricate */
+  NULL,                       /* struct pthread_extricate_if *p_extricate */
+  NULL,	                      /* pthread_readlock_info *p_readlock_list; */
+  NULL,                       /* pthread_readlock_info *p_readlock_free; */
+  0                           /* int p_untracked_readlock_count; */
 };
 
 /* Pointer to the main thread (the father of the thread manager thread) */
Index: linuxthreads/queue.h
diff -u linuxthreads/queue.h:1.2 linuxthreads/queue.h:1.2.2.1
--- linuxthreads/queue.h:1.2	Sun Jan  2 17:50:26 2000
+++ linuxthreads/queue.h	Sat Jan  8 22:06:13 2000
@@ -54,3 +54,8 @@
   }
   return 0;
 }
+
+static inline int queue_is_empty(pthread_descr * q)
+{
+    return *q == NULL;
+}
Index: linuxthreads/rwlock.c
diff -u linuxthreads/rwlock.c:1.2 linuxthreads/rwlock.c:1.2.2.2
--- linuxthreads/rwlock.c:1.2	Sun Jan  2 17:50:26 2000
+++ linuxthreads/rwlock.c	Tue Jan 11 12:48:06 2000
@@ -21,11 +21,167 @@
 
 #include <errno.h>
 #include <pthread.h>
+#include <stdlib.h>
 #include "internals.h"
 #include "queue.h"
 #include "spinlock.h"
 #include "restart.h"
 
+/*
+ * Check whether the calling thread already owns one or more read locks on the
+ * specified lock. If so, return a pointer to the read lock info structure
+ * corresponding to that lock.
+ */
+
+static pthread_readlock_info *
+rwlock_is_in_list(pthread_descr self, pthread_rwlock_t *rwlock)
+{
+  pthread_readlock_info *info;
+
+  for (info = self->p_readlock_list; info != NULL; info = info->pr_next)
+    {
+      if (info->pr_lock == rwlock)
+	return info;
+    }
+
+  return NULL;
+}
+
+/*
+ * Add a new lock to the thread's list of locks for which it has a read lock.
+ * A new info node must be allocated for this, which is taken from the thread's
+ * free list, or by calling malloc. If malloc fails, a null pointer is
+ * returned. Otherwise the lock info structure is initialized and pushed
+ * onto the thread's list.
+ */
+
+static pthread_readlock_info *
+rwlock_add_to_list(pthread_descr self, pthread_rwlock_t *rwlock)
+{
+  pthread_readlock_info *info = self->p_readlock_free;
+
+  if (info != NULL) 
+    self->p_readlock_free = info->pr_next;
+  else
+    info = malloc(sizeof *info);
+
+  if (info == NULL)
+    return NULL;
+
+  info->pr_lock_count = 1;
+  info->pr_lock = rwlock;
+  info->pr_next = self->p_readlock_list;
+  self->p_readlock_list = info;
+
+  return info;
+}
+
+/*
+ * If the thread owns a read lock over the given pthread_rwlock_t,
+ * and this read lock is tracked in the thread's lock list,
+ * this function returns a pointer to the info node in that list.
+ * It also decrements the lock count within that node, and if
+ * it reaches zero, it removes the node from the list.
+ * If nothing is found, it returns a null pointer.
+ */
+
+static pthread_readlock_info *
+rwlock_remove_from_list(pthread_descr self, pthread_rwlock_t *rwlock)
+{
+  pthread_readlock_info **pinfo;
+
+  for (pinfo = &self->p_readlock_list; *pinfo != NULL; pinfo = &(*pinfo)->pr_next)
+    {
+      if ((*pinfo)->pr_lock == rwlock)
+	{
+	  pthread_readlock_info *info = *pinfo;
+	  if (--info->pr_lock_count == 0)
+	    *pinfo = info->pr_next;
+	  return info;
+	}
+    }
+  
+  return NULL;
+}
+
+/*
+ * This function checks whether the conditions are right to place a read lock.
+ * It returns 1 if so, otherwise zero. The rwlock's internal lock must be
+ * locked upon entry.
+ */
+
+static int
+rwlock_can_rdlock(pthread_rwlock_t *rwlock, int have_lock_already)
+{
+  /* Can't readlock; it is write locked. */
+  if (rwlock->__rw_writer != NULL)
+    return 0;
+
+  /* Lock prefers readers; get it. */
+  if (rwlock->__rw_kind == PTHREAD_RWLOCK_PREFER_READER_NP)
+    return 1;
+
+  /* Lock prefers writers, but none are waiting. */
+  if (queue_is_empty(&rwlock->__rw_write_waiting))
+    return 1;
+
+  /* Writers are waiting, but this thread already has a read lock */
+  if (have_lock_already)
+    return 1;
+
+  /* Writers are waiting, and this is a new lock */
+  return 0;
+}
+
+/*
+ * This function helps support brain-damaged recursive read locking
+ * semantics required by Unix 98, while maintaining write priority.
+ * This basically determines whether this thread already holds a read lock
+ * already. It returns 1 if so, otherwise it returns 0.  
+ *
+ * If the thread has any ``untracked read locks'' then it just assumes
+ * that this lock is among them, just to be safe, and returns 1.
+ *
+ * Also, if it finds the thread's lock in the list, it sets the pointer
+ * referenced by pexisting to refer to the list entry.
+ *
+ * If the thread has no untracked locks, and the lock is not found
+ * in its list, then it is added to the list. If this fails,
+ * then *pout_of_mem is set to 1.
+ */
+
+static int
+rwlock_have_already(pthread_descr *pself, pthread_rwlock_t *rwlock,
+    pthread_readlock_info **pexisting, int *pout_of_mem)
+{
+  pthread_readlock_info *existing = NULL;
+  int out_of_mem = 0, have_lock_already = 0;
+  pthread_descr self = *pself;
+
+  if (rwlock->__rw_kind == PTHREAD_RWLOCK_PREFER_WRITER_NP)
+    {
+      if (!self)
+	self = thread_self();
+
+      existing = rwlock_is_in_list(self, rwlock);
+
+      if (existing != NULL || self->p_untracked_readlock_count > 0)
+	have_lock_already = 1;
+      else
+	{
+	  existing = rwlock_add_to_list(self, rwlock);
+	  if (existing == NULL)
+	    out_of_mem = 1;
+	}
+    }
+
+  *pout_of_mem = out_of_mem;
+  *pexisting = existing;
+  *pself = self;
+
+  return have_lock_already;
+}
+
 int
 pthread_rwlock_init (pthread_rwlock_t *rwlock,
 		     const pthread_rwlockattr_t *attr)
@@ -68,24 +224,26 @@
   return 0;
 }
 
-
 int
 pthread_rwlock_rdlock (pthread_rwlock_t *rwlock)
 {
   pthread_descr self = NULL;
+  pthread_readlock_info *existing;
+  int out_of_mem, have_lock_already;
+
+  have_lock_already = rwlock_have_already(&self, rwlock,
+      &existing, &out_of_mem);
 
-  while (1)
+  for (;;)
     {
+      if (self == NULL)
+	self = thread_self ();
+
       __pthread_lock (&rwlock->__rw_lock, self);
-      if (rwlock->__rw_writer == NULL
-	  || (rwlock->__rw_kind == PTHREAD_RWLOCK_PREFER_READER_NP
-	      && rwlock->__rw_readers != 0))
-	/* We can add a reader lock.  */
+
+      if (rwlock_can_rdlock(rwlock, have_lock_already))
 	break;
 
-      /* Suspend ourselves, then try again */
-      if (self == NULL)
-	self = thread_self ();
       enqueue (&rwlock->__rw_read_waiting, self);
       __pthread_unlock (&rwlock->__rw_lock);
       suspend (self); /* This is not a cancellation point */
@@ -94,26 +252,50 @@
   ++rwlock->__rw_readers;
   __pthread_unlock (&rwlock->__rw_lock);
 
+  if (have_lock_already || out_of_mem)
+    {
+      if (existing != NULL)
+	existing->pr_lock_count++;
+      else
+	self->p_untracked_readlock_count++;
+    }
+  
   return 0;
 }
 
-
 int
 pthread_rwlock_tryrdlock (pthread_rwlock_t *rwlock)
 {
-  int result = EBUSY;
+  pthread_descr self = thread_self();
+  pthread_readlock_info *existing;
+  int out_of_mem, have_lock_already;
+  int retval = EBUSY;
 
-  __pthread_lock (&rwlock->__rw_lock, NULL);
-  if (rwlock->__rw_writer == NULL
-      || (rwlock->__rw_kind == PTHREAD_RWLOCK_PREFER_READER_NP
-	  && rwlock->__rw_readers != 0))
+  have_lock_already = rwlock_have_already(&self, rwlock,
+      &existing, &out_of_mem);
+
+  __pthread_lock (&rwlock->__rw_lock, self);
+
+  if (rwlock_can_rdlock(rwlock, have_lock_already))
     {
       ++rwlock->__rw_readers;
-      result = 0;
+      retval = 0;
     }
+
   __pthread_unlock (&rwlock->__rw_lock);
 
-  return result;
+  if (retval == 0)
+    {
+      if (have_lock_already || out_of_mem)
+	{
+	  if (existing != NULL)
+	    existing->pr_lock_count++;
+	  else
+	    self->p_untracked_readlock_count++;
+	}
+    }
+  
+  return retval;
 }
 
 
@@ -210,6 +392,28 @@
       __pthread_unlock (&rwlock->__rw_lock);
       if (th != NULL)
 	restart (th);
+
+      /* Recursive lock fixup */
+
+      if (rwlock->__rw_kind == PTHREAD_RWLOCK_PREFER_WRITER_NP)
+	{
+	  pthread_descr self = thread_self();
+	  pthread_readlock_info *victim = rwlock_remove_from_list(self, rwlock);
+
+	  if (victim != NULL)
+	    {
+	      if (victim->pr_lock_count == 0)
+		{
+		  victim->pr_next = self->p_readlock_free;
+		  self->p_readlock_free = victim;
+		}
+	    }
+	  else
+	    {
+	      if (self->p_untracked_readlock_count > 0)
+		self->p_untracked_readlock_count--;
+	    }
+	}
     }
 
   return 0;
Index: linuxthreads/sysdeps/pthread/pthread.h
diff -u linuxthreads/sysdeps/pthread/pthread.h:1.1.1.1 linuxthreads/sysdeps/pthread/pthread.h:1.1.1.1.2.1
--- linuxthreads/sysdeps/pthread/pthread.h:1.1.1.1	Sun Jan  2 17:29:16 2000
+++ linuxthreads/sysdeps/pthread/pthread.h	Tue Jan 11 12:48:13 2000
@@ -97,6 +97,7 @@
 {
   PTHREAD_RWLOCK_PREFER_READER_NP,
   PTHREAD_RWLOCK_PREFER_WRITER_NP,
+  PTHREAD_RWLOCK_PREFER_WRITER_NONRECURSIVE_NP,
   PTHREAD_RWLOCK_DEFAULT_NP = PTHREAD_RWLOCK_PREFER_WRITER_NP
 };
 #endif	/* Unix98 */
Index: linuxthreads/sysdeps/pthread/bits/pthreadtypes.h
diff -u linuxthreads/sysdeps/pthread/bits/pthreadtypes.h:1.1.1.1 linuxthreads/sysdeps/pthread/bits/pthreadtypes.h:1.1.1.1.2.1
--- linuxthreads/sysdeps/pthread/bits/pthreadtypes.h:1.1.1.1	Sun Jan  2 17:29:16 2000
+++ linuxthreads/sysdeps/pthread/bits/pthreadtypes.h	Tue Jan 11 12:48:13 2000
@@ -95,7 +95,7 @@
 
 #ifdef __USE_UNIX98
 /* Read-write locks.  */
-typedef struct
+typedef struct _pthread_rwlock_t
 {
   struct _pthread_fastlock __rw_lock; /* Lock to guarantee mutual exclusion */
   int __rw_readers;                   /* Number of readers */


Index Nav: [Date Index] [Subject Index] [Author Index] [Thread Index]
Message Nav: [Date Prev] [Date Next] [Thread Prev] [Thread Next]