Index: linuxthreads/internals.h
===================================================================
RCS file: /cvs/glibc/libc/linuxthreads/internals.h,v
retrieving revision 1.66
diff -u -p -r1.66 internals.h
--- internals.h	2001/04/23 20:03:15	1.66
+++ internals.h	2001/04/25 06:51:17
@@ -27,7 +27,8 @@
 #include <sys/types.h>
 #include <bits/libc-tsd.h> /* for _LIBC_TSD_KEY_N */
 
-extern long int testandset (int *spinlock);
+extern long int testandset (_lt_spinlock_t *spinlock);
+extern long int test (_lt_spinlock_t *spinlock);
 extern int __compare_and_swap (long int *p, long int oldval, long int newval);
 
 #include "pt-machine.h"
@@ -106,7 +107,7 @@ typedef struct _pthread_extricate_struct
 
 struct pthread_atomic {
   long p_count;
-  int p_spinlock;
+  _lt_spinlock_t p_spinlock;
 };
 
 /* Context info for read write locks. The pthread_rwlock_info structure
Index: linuxthreads/pt-machine.c
===================================================================
RCS file: /cvs/glibc/libc/linuxthreads/pt-machine.c,v
retrieving revision 1.2
diff -u -p -r1.2 pt-machine.c
--- pt-machine.c	2000/12/27 17:15:20	1.2
+++ pt-machine.c	2001/04/25 06:51:17
@@ -19,7 +19,9 @@
 
 #define PT_EI
 
-extern long int testandset (int *spinlock);
+#include <pthread.h>
+
+extern long int testandset (_lt_spinlock_t *spinlock);
 extern int __compare_and_swap (long int *p, long int oldval, long int newval);
 
 #include <pt-machine.h>
Index: linuxthreads/rwlock.c
===================================================================
RCS file: /cvs/glibc/libc/linuxthreads/rwlock.c,v
retrieving revision 1.18
diff -u -p -r1.18 rwlock.c
--- rwlock.c	2000/12/27 17:16:24	1.18
+++ rwlock.c	2001/04/25 06:51:17
@@ -36,9 +36,9 @@ static int rwlock_rd_extricate_func(void
   pthread_rwlock_t *rwlock = obj;
   int did_remove = 0;
 
-  __pthread_lock((struct _pthread_fastlock *) &rwlock->__rw_lock, NULL);
+  __pthread_lock(&rwlock->__rw_lock, NULL);
   did_remove = remove_from_queue(&rwlock->__rw_read_waiting, th);
-  __pthread_unlock((struct _pthread_fastlock *) &rwlock->__rw_lock);
+  __pthread_unlock(&rwlock->__rw_lock);
 
   return did_remove;
 }
@@ -48,9 +48,9 @@ static int rwlock_wr_extricate_func(void
   pthread_rwlock_t *rwlock = obj;
   int did_remove = 0;
 
-  __pthread_lock((struct _pthread_fastlock *) &rwlock->__rw_lock, NULL);
+  __pthread_lock(&rwlock->__rw_lock, NULL);
   did_remove = remove_from_queue(&rwlock->__rw_write_waiting, th);
-  __pthread_unlock((struct _pthread_fastlock *) &rwlock->__rw_lock);
+  __pthread_unlock(&rwlock->__rw_lock);
 
   return did_remove;
 }
Index: linuxthreads/semaphore.c
===================================================================
RCS file: /cvs/glibc/libc/linuxthreads/semaphore.c,v
retrieving revision 1.16
diff -u -p -r1.16 semaphore.c
--- semaphore.c	2001/04/10 21:12:00	1.16
+++ semaphore.c	2001/04/25 06:51:17
@@ -33,7 +33,7 @@ int __new_sem_init(sem_t *sem, int pshar
     errno = ENOSYS;
     return -1;
   }
-  __pthread_init_lock((struct _pthread_fastlock *) &sem->__sem_lock);
+  __pthread_init_lock(&sem->__sem_lock);
   sem->__sem_value = value;
   sem->__sem_waiting = NULL;
   return 0;
@@ -48,9 +48,9 @@ static int new_sem_extricate_func(void *
   sem_t *sem = obj;
   int did_remove = 0;
 
-  __pthread_lock((struct _pthread_fastlock *) &sem->__sem_lock, self);
+  __pthread_lock(&sem->__sem_lock, self);
   did_remove = remove_from_queue(&sem->__sem_waiting, th);
-  __pthread_unlock((struct _pthread_fastlock *) &sem->__sem_lock);
+  __pthread_unlock(&sem->__sem_lock);
 
   return did_remove;
 }
@@ -66,10 +66,10 @@ int __new_sem_wait(sem_t * sem)
   extr.pu_object = sem;
   extr.pu_extricate_func = new_sem_extricate_func;
 
-  __pthread_lock((struct _pthread_fastlock *) &sem->__sem_lock, self);
+  __pthread_lock(&sem->__sem_lock, self);
   if (sem->__sem_value > 0) {
     sem->__sem_value--;
-    __pthread_unlock((struct _pthread_fastlock *) &sem->__sem_lock);
+    __pthread_unlock(&sem->__sem_lock);
     return 0;
   }
   /* Register extrication interface */
@@ -81,7 +81,7 @@ int __new_sem_wait(sem_t * sem)
     enqueue(&sem->__sem_waiting, self);
   else
     already_canceled = 1;
-  __pthread_unlock((struct _pthread_fastlock *) &sem->__sem_lock);
+  __pthread_unlock(&sem->__sem_lock);
 
   if (already_canceled) {
     __pthread_set_own_extricate_if(self, 0);
@@ -121,7 +121,7 @@ int __new_sem_trywait(sem_t * sem)
 {
   int retval;
 
-  __pthread_lock((struct _pthread_fastlock *) &sem->__sem_lock, NULL);
+  __pthread_lock(&sem->__sem_lock, NULL);
   if (sem->__sem_value == 0) {
     errno = EAGAIN;
     retval = -1;
@@ -129,7 +129,7 @@ int __new_sem_trywait(sem_t * sem)
     sem->__sem_value--;
     retval = 0;
   }
-  __pthread_unlock((struct _pthread_fastlock *) &sem->__sem_lock);
+  __pthread_unlock(&sem->__sem_lock);
   return retval;
 }
 
@@ -140,19 +140,19 @@ int __new_sem_post(sem_t * sem)
   struct pthread_request request;
 
   if (THREAD_GETMEM(self, p_in_sighandler) == NULL) {
-    __pthread_lock((struct _pthread_fastlock *) &sem->__sem_lock, self);
+    __pthread_lock(&sem->__sem_lock, self);
     if (sem->__sem_waiting == NULL) {
       if (sem->__sem_value >= SEM_VALUE_MAX) {
         /* Overflow */
         errno = ERANGE;
-        __pthread_unlock((struct _pthread_fastlock *) &sem->__sem_lock);
+        __pthread_unlock(&sem->__sem_lock);
         return -1;
       }
       sem->__sem_value++;
-      __pthread_unlock((struct _pthread_fastlock *) &sem->__sem_lock);
+      __pthread_unlock(&sem->__sem_lock);
     } else {
       th = dequeue(&sem->__sem_waiting);
-      __pthread_unlock((struct _pthread_fastlock *) &sem->__sem_lock);
+      __pthread_unlock(&sem->__sem_lock);
       th->p_sem_avail = 1;
       WRITE_MEMORY_BARRIER();
       restart(th);
@@ -214,17 +214,17 @@ int sem_timedwait(sem_t *sem, const stru
   int already_canceled = 0;
   int spurious_wakeup_count;
 
-  __pthread_lock((struct _pthread_fastlock *) &sem->__sem_lock, self);
+  __pthread_lock(&sem->__sem_lock, self);
   if (sem->__sem_value > 0) {
     --sem->__sem_value;
-    __pthread_unlock((struct _pthread_fastlock *) &sem->__sem_lock);
+    __pthread_unlock(&sem->__sem_lock);
     return 0;
   }
 
   if (abstime->tv_nsec < 0 || abstime->tv_nsec >= 1000000000) {
     /* The standard requires that if the function would block and the
        time value is illegal, the function returns with an error.  */
-    __pthread_unlock((struct _pthread_fastlock *) &sem->__sem_lock);
+    __pthread_unlock(&sem->__sem_lock);
     return EINVAL;
   }
 
@@ -241,7 +241,7 @@ int sem_timedwait(sem_t *sem, const stru
     enqueue(&sem->__sem_waiting, self);
   else
     already_canceled = 1;
-  __pthread_unlock((struct _pthread_fastlock *) &sem->__sem_lock);
+  __pthread_unlock(&sem->__sem_lock);
 
   if (already_canceled) {
     __pthread_set_own_extricate_if(self, 0);
@@ -257,9 +257,9 @@ int sem_timedwait(sem_t *sem, const stru
 	/* __pthread_lock will queue back any spurious restarts that
 	   may happen to it. */
 
-	__pthread_lock((struct _pthread_fastlock *)&sem->__sem_lock, self);
+	__pthread_lock(&sem->__sem_lock, self);
 	was_on_queue = remove_from_queue(&sem->__sem_waiting, self);
-	__pthread_unlock((struct _pthread_fastlock *)&sem->__sem_lock);
+	__pthread_unlock(&sem->__sem_lock);
 
 	if (was_on_queue) {
 	  __pthread_set_own_extricate_if(self, 0);
Index: linuxthreads/semaphore.h
===================================================================
RCS file: /cvs/glibc/libc/linuxthreads/semaphore.h,v
retrieving revision 1.10
diff -u -p -r1.10 semaphore.h
--- semaphore.h	2001/01/27 05:39:52	1.10
+++ semaphore.h	2001/04/25 06:51:17
@@ -31,11 +31,7 @@ typedef struct _pthread_descr_struct *_p
 /* System specific semaphore definition.  */
 typedef struct
 {
-  struct
-  {
-    long int __status;
-    int __spinlock;
-  } __sem_lock;
+  struct _pthread_fastlock __sem_lock;
   int __sem_value;
   _pthread_descr __sem_waiting;
 } sem_t;
Index: linuxthreads/spinlock.c
===================================================================
RCS file: /cvs/glibc/libc/linuxthreads/spinlock.c,v
retrieving revision 1.32
diff -u -p -r1.32 spinlock.c
--- spinlock.c	2001/02/19 18:47:27	1.32
+++ spinlock.c	2001/04/25 06:51:17
@@ -25,7 +25,7 @@
 #include "restart.h"
 
 #if !defined HAS_COMPARE_AND_SWAP || defined TEST_FOR_COMPARE_AND_SWAP
-static void __pthread_acquire(int * spinlock);
+static void __pthread_acquire(_lt_spinlock_t * spinlock);
 #endif
 
 
@@ -239,12 +239,12 @@ again:
 struct wait_node {
   struct wait_node *next;	/* Next node in null terminated linked list */
   pthread_descr thr;		/* The thread waiting with this node */
-  int abandoned;		/* Atomic flag */
+  _lt_spinlock_t abandoned;		/* Atomic flag */
 };
 
 static long wait_node_free_list;
 #if !defined HAS_COMPARE_AND_SWAP || defined TEST_FOR_COMPARE_AND_SWAP
-static int wait_node_free_list_spinlock;
+static _lt_spinlock_t wait_node_free_list_spinlock;
 #endif
 
 /* Allocate a new node from the head of the free list using an atomic
@@ -272,7 +272,7 @@ static struct wait_node *wait_node_alloc
       wait_node_free_list = (long) new_node->next;
     }
     WRITE_MEMORY_BARRIER();
-    wait_node_free_list_spinlock = 0;
+    wait_node_free_list_spinlock = __LT_SPINLOCK_INIT;
 
     if (new_node == 0)
       return malloc(sizeof *wait_node_alloc());
@@ -314,7 +314,7 @@ static void wait_node_free(struct wait_n
     wn->next = (struct wait_node *) wait_node_free_list;
     wait_node_free_list = (long) wn;
     WRITE_MEMORY_BARRIER();
-    wait_node_free_list_spinlock = 0;
+    wait_node_free_list_spinlock = __LT_SPINLOCK_INIT;
     return;
   }
 #endif
@@ -387,7 +387,7 @@ void __pthread_alt_lock(struct _pthread_
       if (self == NULL)
 	self = thread_self();
 
-      wait_node.abandoned = 0;
+      wait_node.abandoned = __LT_SPINLOCK_INIT;
       wait_node.next = (struct wait_node *) lock->__status;
       wait_node.thr = self;
       lock->__status = (long) &wait_node;
@@ -414,7 +414,7 @@ void __pthread_alt_lock(struct _pthread_
       wait_node.thr = self;
       newstatus = (long) &wait_node;
     }
-    wait_node.abandoned = 0;
+    wait_node.abandoned = __LT_SPINLOCK_INIT;
     wait_node.next = (struct wait_node *) oldstatus;
     /* Make sure the store in wait_node.next completes before performing
        the compare-and-swap */
@@ -461,7 +461,7 @@ int __pthread_alt_timedlock(struct _pthr
       if (self == NULL)
 	self = thread_self();
 
-      p_wait_node->abandoned = 0;
+      p_wait_node->abandoned = __LT_SPINLOCK_INIT;
       p_wait_node->next = (struct wait_node *) lock->__status;
       p_wait_node->thr = self;
       lock->__status = (long) p_wait_node;
@@ -485,7 +485,7 @@ int __pthread_alt_timedlock(struct _pthr
       p_wait_node->thr = self;
       newstatus = (long) p_wait_node;
     }
-    p_wait_node->abandoned = 0;
+    p_wait_node->abandoned = __LT_SPINLOCK_INIT;
     p_wait_node->next = (struct wait_node *) oldstatus;
     /* Make sure the store in wait_node.next completes before performing
        the compare-and-swap */
@@ -579,7 +579,7 @@ void __pthread_alt_unlock(struct _pthrea
     while (p_node != (struct wait_node *) 1) {
       int prio;
 
-      if (p_node->abandoned) {
+      if (test(&p_node->abandoned)) {
 	/* Remove abandoned node. */
 #if defined TEST_FOR_COMPARE_AND_SWAP
 	if (!__pthread_has_cas)
@@ -665,7 +665,7 @@ int __pthread_has_cas = 0;
 #if !defined HAS_COMPARE_AND_SWAP || defined TEST_FOR_COMPARE_AND_SWAP
 
 int __pthread_compare_and_swap(long * ptr, long oldval, long newval,
-                               int * spinlock)
+                               _lt_spinlock_t * spinlock)
 {
   int res;
   if (testandset(spinlock)) __pthread_acquire(spinlock);
@@ -676,7 +676,7 @@ int __pthread_compare_and_swap(long * pt
   }
   /* Prevent reordering of store to *ptr above and store to *spinlock below */
   WRITE_MEMORY_BARRIER();
-  *spinlock = 0;
+  *spinlock = __LT_SPINLOCK_INIT;
   return res;
 }
 
@@ -701,7 +701,7 @@ int __pthread_compare_and_swap(long * pt
    - When nanosleep() returns, we try again, doing MAX_SPIN_COUNT
      sched_yield(), then sleeping again if needed. */
 
-static void __pthread_acquire(int * spinlock)
+static void __pthread_acquire(_lt_spinlock_t * spinlock)
 {
   int cnt = 0;
   struct timespec tm;
Index: linuxthreads/spinlock.h
===================================================================
RCS file: /cvs/glibc/libc/linuxthreads/spinlock.h,v
retrieving revision 1.22
diff -u -p -r1.22 spinlock.h
--- spinlock.h	2001/01/28 08:52:05	1.22
+++ spinlock.h	2001/04/25 06:51:17
@@ -37,10 +37,10 @@
 
 extern int __pthread_has_cas;
 extern int __pthread_compare_and_swap(long * ptr, long oldval, long newval,
-                                      int * spinlock);
+                                      _lt_spinlock_t * spinlock);
 
 static inline int compare_and_swap(long * ptr, long oldval, long newval,
-                                   int * spinlock)
+                                   _lt_spinlock_t * spinlock)
 {
   if (__builtin_expect (__pthread_has_cas, 1))
     return __compare_and_swap(ptr, oldval, newval);
@@ -58,7 +58,7 @@ static inline int compare_and_swap(long 
 
 static inline int
 compare_and_swap_with_release_semantics (long * ptr, long oldval,
-					 long newval, int * spinlock)
+					 long newval, _lt_spinlock_t * spinlock)
 {
   return __compare_and_swap_with_release_semantics (ptr, oldval,
 						    newval);
@@ -67,7 +67,7 @@ compare_and_swap_with_release_semantics 
 #endif
 
 static inline int compare_and_swap(long * ptr, long oldval, long newval,
-                                   int * spinlock)
+                                   _lt_spinlock_t * spinlock)
 {
   return __compare_and_swap(ptr, oldval, newval);
 }
@@ -75,10 +75,10 @@ static inline int compare_and_swap(long 
 #else
 
 extern int __pthread_compare_and_swap(long * ptr, long oldval, long newval,
-                                      int * spinlock);
+                                      _lt_spinlock_t * spinlock);
 
 static inline int compare_and_swap(long * ptr, long oldval, long newval,
-                                   int * spinlock)
+                                   _lt_spinlock_t * spinlock)
 {
   return __pthread_compare_and_swap(ptr, oldval, newval, spinlock);
 }
Index: linuxthreads/sysdeps/hppa/pspinlock.c
===================================================================
RCS file: /cvs/glibc/libc/linuxthreads/sysdeps/hppa/pspinlock.c,v
retrieving revision 1.2
diff -u -p -r1.2 pspinlock.c
--- pspinlock.c	2000/12/27 17:17:17	1.2
+++ pspinlock.c	2001/04/25 06:51:17
@@ -21,18 +21,20 @@
 #include <pthread.h>
 #include "internals.h"
 
+/* LDCW, the only atomic read-write operation PA-RISC has. *sigh*.  */
+#define __ldcw(a) ({ \
+	unsigned __ret; \
+	__asm__ __volatile__("ldcw 0(%1),%0" : "=r" (__ret) : "r" (a)); \
+	__ret; \
+})
+
 int
 __pthread_spin_lock (pthread_spinlock_t *lock)
 {
-  unsigned int val;
-
-  do
-    asm volatile ("ldcw %1,%0"
-		  : "=r" (val), "=m" (*lock)
-		  : "m" (*lock));
-  while (!val);
+	while (__ldcw (*lock) == 0)
+		while (*lock == 0) ;
 
-  return 0;
+	return 0;
 }
 weak_alias (__pthread_spin_lock, pthread_spin_lock)
 
@@ -40,11 +42,7 @@ weak_alias (__pthread_spin_lock, pthread
 int
 __pthread_spin_trylock (pthread_spinlock_t *lock)
 {
-  unsigned int val;
-
-  asm volatile ("ldcw %1,%0"
-		: "=r" (val), "=m" (*lock)
-		: "m" (*lock));
+  unsigned int val = __ldcw(*lock);
 
   return val ? 0 : EBUSY;
 }
Index: linuxthreads/sysdeps/hppa/pt-machine.h
===================================================================
RCS file: /cvs/glibc/libc/linuxthreads/sysdeps/hppa/pt-machine.h,v
retrieving revision 1.2
diff -u -p -r1.2 pt-machine.h
--- pt-machine.h	2000/12/18 05:55:14	1.2
+++ pt-machine.h	2001/04/25 06:51:17
@@ -19,6 +19,7 @@
    write to the Free Software Foundation, Inc., 59 Temple Place - Suite 330,
    Boston, MA 02111-1307, USA.  */
 
+#include <sys/types.h>
 #include <bits/initspin.h>
 
 #ifndef PT_EI
@@ -35,11 +36,9 @@ register char * stack_pointer __asm__ ("
    load and clear, so hppa spinlocks must use zero to signify that
    someone is holding the lock.  */
 
-#define xstr(s) str(s)
-#define str(s) #s
 /* Spinlock implementation; required.  */
 PT_EI long int
-testandset (int *spinlock)
+testandset (_lt_spinlock_t *spinlock)
 {
   int ret;
 
@@ -50,5 +49,9 @@ testandset (int *spinlock)
 
   return ret == 0;
 }
-#undef str
-#undef xstr
+
+PT_EI long int
+test (_lt_spinlock_t *spinlock)
+{
+	return spinlock->lock == 0;
+}
Index: linuxthreads/sysdeps/pthread/bits/initspin.h
===================================================================
RCS file: /cvs/glibc/libc/linuxthreads/sysdeps/pthread/bits/initspin.h,v
retrieving revision 1.2
diff -u -p -r1.2 initspin.h
--- initspin.h	2001/01/28 08:47:31	1.2
+++ initspin.h	2001/04/25 06:51:17
@@ -20,9 +20,8 @@
 /* Initial value of a spinlock.  Most platforms should use zero,
    unless they only implement a "test and clear" operation instead of
    the usual "test and set". */
-#define __LT_SPINLOCK_INIT 0
+#define __LT_SPINLOCK_INIT  ((_lt_spinlock_t) { 0 })
 
 /* Macros for lock initializers, using the above definition. */
-#define __LOCK_INITIALIZER { 0, __LT_SPINLOCK_INIT }
-#define __ALT_LOCK_INITIALIZER { 0, __LT_SPINLOCK_INIT }
-#define __ATOMIC_INITIALIZER { 0, __LT_SPINLOCK_INIT }
+#define __LOCK_INITIALIZER { 0, { 0 } }
+#define __ATOMIC_INITIALIZER { 0, { 0 } }
Index: linuxthreads/sysdeps/pthread/bits/libc-lock.h
===================================================================
RCS file: /cvs/glibc/libc/linuxthreads/sysdeps/pthread/bits/libc-lock.h,v
retrieving revision 1.17
diff -u -p -r1.17 libc-lock.h
--- libc-lock.h	2001/01/28 16:39:07	1.17
+++ libc-lock.h	2001/04/25 06:51:17
@@ -55,7 +55,7 @@ typedef pthread_key_t __libc_key_t;
    initialized locks must be set to one due to the lack of normal
    atomic operations.) */
 
-#if __LT_SPINLOCK_INIT == 0
+#if 0 /* __LT_SPINLOCK_INIT == 0 */
 #  define __libc_lock_define_initialized(CLASS,NAME) \
   CLASS __libc_lock_t NAME;
 #else
Index: linuxthreads/sysdeps/pthread/bits/pthreadtypes.h
===================================================================
RCS file: /cvs/glibc/libc/linuxthreads/sysdeps/pthread/bits/pthreadtypes.h,v
retrieving revision 1.9
diff -u -p -r1.9 pthreadtypes.h
--- pthreadtypes.h	2001/01/27 06:26:13	1.9
+++ pthreadtypes.h	2001/04/25 06:51:17
@@ -22,11 +22,15 @@
 #define __need_schedparam
 #include <bits/sched.h>
 
+typedef struct {
+	int lock;
+} _lt_spinlock_t;
+
 /* Fast locks (not abstract because mutexes and conditions aren't abstract). */
 struct _pthread_fastlock
 {
   long int __status;   /* "Free" or "taken" or head of waiting list */
-  int __spinlock;      /* Used by compare_and_swap emulation. Also,
+  _lt_spinlock_t __spinlock;  /* Used by compare_and_swap emulation. Also,
 			  adaptive SMP lock stores spin count here. */
 };
 
Index: linuxthreads/sysdeps/unix/sysv/linux/hppa/bits/initspin.h
===================================================================
RCS file: /cvs/glibc/libc/linuxthreads/sysdeps/unix/sysv/linux/hppa/bits/initspin.h,v
retrieving revision 1.2
diff -u -p -r1.2 initspin.h
--- initspin.h	2001/01/28 08:47:11	1.2
+++ initspin.h	2001/04/25 06:51:17
@@ -19,9 +19,8 @@
 
 /* Initial value of a spinlock.  PA-RISC only implements atomic load
    and clear so this must be non-zero. */
-#define __LT_SPINLOCK_INIT 1
+#define __LT_SPINLOCK_INIT  ((_lt_spinlock_t) { 1 })
 
 /* Macros for lock initializers, using the above definition. */
-#define __LOCK_INITIALIZER { 0, __LT_SPINLOCK_INIT }
-#define __ALT_LOCK_INITIALIZER { 0, __LT_SPINLOCK_INIT }
-#define __ATOMIC_INITIALIZER { 0, __LT_SPINLOCK_INIT }
+#define __LOCK_INITIALIZER { { 1 }, 0 }
+#define __ATOMIC_INITIALIZER { 0, { 1 } }
Index: sysdeps/hppa/dl-fptr.c
===================================================================
RCS file: /cvs/glibc/libc/sysdeps/hppa/dl-fptr.c,v
retrieving revision 1.1
diff -u -p -r1.1 dl-fptr.c
--- dl-fptr.c	2000/10/15 03:18:05	1.1
+++ dl-fptr.c	2001/04/25 06:51:18
@@ -29,8 +29,7 @@
 #ifdef _LIBC_REENTRANT
 # include <pt-machine.h>
 
-/* Remember, we use 0 to mean that a lock is taken on PA-RISC. */
-static int __hppa_fptr_lock = 1;
+static _lt_spinlock_t __hppa_fptr_lock = __LT_SPINLOCK_INIT;
 #endif
 
 /* Because ld.so is now versioned, these functions can be in their own
@@ -126,9 +125,9 @@ __hppa_make_fptr (const struct link_map 
 
 found:
 #ifdef _LIBC_REENTRANT
-  /* Release the lock.  Again, remember, zero means the lock is taken!  */
+  /* Release the lock. */
   if (mem == NULL)
-    __hppa_fptr_lock = 1;
+    __hppa_fptr_lock = __LT_SPINLOCK_INIT;
 #endif
 
   /* Set bit 30 to indicate to $$dyncall that this is a PLABEL. */
@@ -181,7 +180,7 @@ _dl_unmap (struct link_map *map)
 
 #ifdef _LIBC_REENTRANT
   /* Release the lock. */
-  __hppa_fptr_lock = 1;
+  __hppa_fptr_lock = __LT_SPINLOCK_INIT;
 #endif
 }
 
@@ -204,8 +203,8 @@ _dl_lookup_address (const void *address)
       }
 
 #ifdef _LIBC_REENTRANT
-  /* Release the lock.   */
-  __hppa_fptr_lock = 1;
+  /* Release the lock. */
+  __hppa_fptr_lock = __LT_SPINLOCK_INIT;
 #endif
 
   return addr;
--- /dev/null	Tue Apr  3 14:19:17 2001
+++ linuxthreads/sysdeps/unix/sysv/linux/hppa/bits/pthreadtypes.h	Tue Apr 24 17:32:20 2001
@@ -0,0 +1,146 @@
+/* Linuxthreads - a simple clone()-based implementation of Posix        */
+/* threads for Linux.                                                   */
+/* Copyright (C) 1996 Xavier Leroy (Xavier.Leroy@inria.fr)              */
+/*                                                                      */
+/* This program is free software; you can redistribute it and/or        */
+/* modify it under the terms of the GNU Library General Public License  */
+/* as published by the Free Software Foundation; either version 2       */
+/* of the License, or (at your option) any later version.               */
+/*                                                                      */
+/* This program is distributed in the hope that it will be useful,      */
+/* but WITHOUT ANY WARRANTY; without even the implied warranty of       */
+/* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the        */
+/* GNU Library General Public License for more details.                 */
+
+#if !defined _BITS_TYPES_H && !defined _PTHREAD_H
+# error "Never include <bits/pthreadtypes.h> directly; use <sys/types.h> instead."
+#endif
+
+#ifndef _BITS_PTHREADTYPES_H
+#define _BITS_PTHREADTYPES_H	1
+
+#define __need_schedparam
+#include <bits/sched.h>
+
+typedef struct {
+	int lock;
+} _lt_spinlock_t __attribute__((aligned (16)));
+
+/* Fast locks (not abstract because mutexes and conditions aren't abstract). */
+struct _pthread_fastlock
+{
+  _lt_spinlock_t __spinlock;  /* Used by compare_and_swap emulation. Also,
+			  adaptive SMP lock stores spin count here. */
+  long int __status;   /* "Free" or "taken" or head of waiting list */
+};
+
+#ifndef _PTHREAD_DESCR_DEFINED
+/* Thread descriptors */
+typedef struct _pthread_descr_struct *_pthread_descr;
+# define _PTHREAD_DESCR_DEFINED
+#endif
+
+
+/* Attributes for threads.  */
+typedef struct __pthread_attr_s
+{
+  int __detachstate;
+  int __schedpolicy;
+  struct __sched_param __schedparam;
+  int __inheritsched;
+  int __scope;
+  size_t __guardsize;
+  int __stackaddr_set;
+  void *__stackaddr;
+  size_t __stacksize;
+} pthread_attr_t;
+
+
+/* Conditions (not abstract because of PTHREAD_COND_INITIALIZER */
+typedef struct
+{
+  struct _pthread_fastlock __c_lock; /* Protect against concurrent access */
+  _pthread_descr __c_waiting;        /* Threads waiting on this condition */
+} pthread_cond_t;
+
+
+/* Attribute for conditionally variables.  */
+typedef struct
+{
+  int __dummy;
+} pthread_condattr_t;
+
+/* Keys for thread-specific data */
+typedef unsigned int pthread_key_t;
+
+
+/* Mutexes (not abstract because of PTHREAD_MUTEX_INITIALIZER).  */
+/* (The layout is unnatural to maintain binary compatibility
+    with earlier releases of LinuxThreads.) */
+typedef struct
+{
+  int __m_reserved;               /* Reserved for future use */
+  int __m_count;                  /* Depth of recursive locking */
+  _pthread_descr __m_owner;       /* Owner thread (if recursive or errcheck) */
+  int __m_kind;                   /* Mutex kind: fast, recursive or errcheck */
+  struct _pthread_fastlock __m_lock; /* Underlying fast lock */
+} pthread_mutex_t;
+
+
+/* Attribute for mutex.  */
+typedef struct
+{
+  int __mutexkind;
+} pthread_mutexattr_t;
+
+
+/* Once-only execution */
+typedef int pthread_once_t;
+
+
+#ifdef __USE_UNIX98
+/* Read-write locks.  */
+typedef struct _pthread_rwlock_t
+{
+  struct _pthread_fastlock __rw_lock; /* Lock to guarantee mutual exclusion */
+  int __rw_readers;                   /* Number of readers */
+  _pthread_descr __rw_writer;         /* Identity of writer, or NULL if none */
+  _pthread_descr __rw_read_waiting;   /* Threads waiting for reading */
+  _pthread_descr __rw_write_waiting;  /* Threads waiting for writing */
+  int __rw_kind;                      /* Reader/Writer preference selection */
+  int __rw_pshared;                   /* Shared between processes or not */
+} pthread_rwlock_t;
+
+
+/* Attribute for read-write locks.  */
+typedef struct
+{
+  int __lockkind;
+  int __pshared;
+} pthread_rwlockattr_t;
+#endif
+
+#ifdef __USE_XOPEN2K
+/* POSIX spinlock data type.  */
+typedef volatile int pthread_spinlock_t __attribute__((aligned (16)));
+
+/* POSIX barrier. */
+typedef struct {
+  struct _pthread_fastlock __ba_lock; /* Lock to guarantee mutual exclusion */
+  int __ba_required;                  /* Threads needed for completion */
+  int __ba_present;                   /* Threads waiting */
+  _pthread_descr __ba_waiting;        /* Queue of waiting threads */
+} pthread_barrier_t;
+
+/* barrier attribute */
+typedef struct {
+  int __pshared;
+} pthread_barrierattr_t;
+
+#endif
+
+
+/* Thread identifiers */
+typedef unsigned long int pthread_t;
+
+#endif	/* bits/pthreadtypes.h */
