? linuxthreads/sysdeps/unix/sysv/linux/hppa/bits/pthreadtypes.h
Index: linuxthreads/attr.c
===================================================================
RCS file: /cvs/glibc/libc/linuxthreads/attr.c,v
retrieving revision 1.22
diff -u -p -r1.22 attr.c
--- attr.c	2001/03/27 03:19:51	1.22
+++ attr.c	2001/04/28 00:19:35
@@ -298,7 +298,7 @@ int pthread_getattr_np (pthread_t thread
 #ifndef _STACK_GROWS_UP
   attr->__stackaddr = (char *)(descr + 1);
 #else
-# error __stackaddr not handled
+  attr->__stackaddr = (char *)descr;
 #endif
 
   return 0;
Index: linuxthreads/internals.h
===================================================================
RCS file: /cvs/glibc/libc/linuxthreads/internals.h,v
retrieving revision 1.66
diff -u -p -r1.66 internals.h
--- internals.h	2001/04/23 20:03:15	1.66
+++ internals.h	2001/04/28 00:19:35
@@ -27,7 +27,8 @@
 #include <sys/types.h>
 #include <bits/libc-tsd.h> /* for _LIBC_TSD_KEY_N */
 
-extern long int testandset (int *spinlock);
+extern int try_lock(__atomic_lock_t *spinlock);
+extern int lock_held(__atomic_lock_t *spinlock);
 extern int __compare_and_swap (long int *p, long int oldval, long int newval);
 
 #include "pt-machine.h"
@@ -106,7 +107,7 @@ typedef struct _pthread_extricate_struct
 
 struct pthread_atomic {
   long p_count;
-  int p_spinlock;
+  __atomic_lock_t p_spinlock;
 };
 
 /* Context info for read write locks. The pthread_rwlock_info structure
@@ -384,7 +385,11 @@ static inline pthread_descr thread_self 
   else if (__pthread_nonstandard_stacks)
     return __pthread_find_self();
   else
+#ifdef _STACK_GROWS_DOWN
     return (pthread_descr)(((unsigned long)sp | (STACK_SIZE-1))+1) - 1;
+#else
+    return (pthread_descr)((unsigned long)sp &~ (STACK_SIZE-1));
+#endif
 #endif
 }
 
Index: linuxthreads/manager.c
===================================================================
RCS file: /cvs/glibc/libc/linuxthreads/manager.c,v
retrieving revision 1.69
diff -u -p -r1.69 manager.c
--- manager.c	2001/04/23 18:50:30	1.69
+++ manager.c	2001/04/28 00:19:35
@@ -401,7 +401,41 @@ static int pthread_allocate_stack(const 
 
       guardaddr = new_thread_bottom + stacksize/2;
       /* We leave the guard area in the middle unmapped.	*/
-#else  /* !NEED_SEPARATE_REGISTER_STACK */
+#elif defined (_STACK_GROWS_UP)
+
+      /* The thread description goes at the bottom of this area, and
+       * the stack  starts directly above it.  We neglect to map the last
+       * page of data in order to prevent a rogue thread overwriting
+       * another's stack data.
+       */
+      if (attr != NULL)
+	{
+	  guardsize = page_roundup (attr->__guardsize, granularity);
+	  stacksize = STACK_SIZE - guardsize;
+	  stacksize = MIN (stacksize,
+			   page_roundup (attr->__stacksize, granularity));
+	}
+      else
+	{
+	  guardsize = granularity;
+	  stacksize = STACK_SIZE - granularity;
+	}
+
+      new_thread = (pthread_descr)((unsigned long)default_new_thread &~ (STACK_
+SIZE - 1));
+      map_addr = mmap(new_thread, stacksize + guardsize,
+		      PROT_READ | PROT_WRITE | PROT_EXEC,
+		      MAP_FIXED | MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
+      if (map_addr == MAP_FAILED)
+	{
+	  return -1;
+	}
+
+      new_thread_bottom = map_addr + sizeof(*new_thread);
+      guardaddr = map_addr + stacksize;
+      guardsize = granularity;
+
+#else  /* !NEED_SEPARATE_REGISTER_STACK && !_STACK_GROWS_UP */
 # if FLOATING_STACKS
       if (attr != NULL)
 	{
@@ -591,6 +625,10 @@ static int pthread_handle_create(pthread
 			 (char *)new_thread - new_thread_bottom,
 			 CLONE_VM | CLONE_FS | CLONE_FILES | CLONE_SIGHAND |
 			 __pthread_sig_cancel, new_thread);
+#elif defined(_STACK_GROWS_UP)
+	  pid = __clone(pthread_start_thread_event, (void **) new_thread_bottom,
+			CLONE_VM | CLONE_FS | CLONE_FILES | CLONE_SIGHAND |
+			__pthread_sig_cancel, new_thread);
 #else
 	  pid = __clone(pthread_start_thread_event, (void **) new_thread,
 			CLONE_VM | CLONE_FS | CLONE_FILES | CLONE_SIGHAND |
@@ -627,6 +665,10 @@ static int pthread_handle_create(pthread
                      (char *)new_thread - new_thread_bottom,
 		     CLONE_VM | CLONE_FS | CLONE_FILES | CLONE_SIGHAND |
 		     __pthread_sig_cancel, new_thread);
+#elif defined(_STACK_GROWS_UP)
+      pid = __clone(pthread_start_thread, (void **) new_thread_bottom,
+		    CLONE_VM | CLONE_FS | CLONE_FILES | CLONE_SIGHAND |
+		    __pthread_sig_cancel, new_thread);
 #else
       pid = __clone(pthread_start_thread, (void **) new_thread,
 		    CLONE_VM | CLONE_FS | CLONE_FILES | CLONE_SIGHAND |
@@ -643,6 +685,9 @@ static int pthread_handle_create(pthread
 			    - new_thread_bottom);
 	munmap((caddr_t)new_thread_bottom,
 	       2 * stacksize + new_thread->p_guardsize);
+#elif defined(_STACK_GROWS_UP)
+	size_t stacksize = guardaddr - (char *)(new_thread);
+	munmap(new_thread, stacksize);
 #else
 	size_t stacksize = (char *)(new_thread+1) - new_thread_bottom;
 	munmap(new_thread_bottom - guardsize, guardsize + stacksize);
@@ -708,6 +753,10 @@ static void pthread_free(pthread_descr t
       size_t guardsize = th->p_guardsize;
       /* Free the stack and thread descriptor area */
       char *guardaddr = th->p_guardaddr;
+#ifdef _STACK_GROWS_UP
+      size_t stacksize = guardaddr - (char *)th;
+      guardaddr = (char *)th;
+#else
       /* Guardaddr is always set, even if guardsize is 0.  This allows
 	 us to compute everything else.  */
       size_t stacksize = (char *)(th+1) - guardaddr - guardsize;
@@ -715,6 +764,7 @@ static void pthread_free(pthread_descr t
       /* Take account of the register stack, which is below guardaddr.  */
       guardaddr -= stacksize;
       stacksize *= 2;
+#endif
 #endif
       /* Unmap the stack.  */
       munmap(guardaddr, stacksize + guardsize);
Index: linuxthreads/oldsemaphore.c
===================================================================
RCS file: /cvs/glibc/libc/linuxthreads/oldsemaphore.c,v
retrieving revision 1.10
diff -u -p -r1.10 oldsemaphore.c
--- oldsemaphore.c	2001/04/10 21:09:57	1.10
+++ oldsemaphore.c	2001/04/28 00:19:35
@@ -73,7 +73,7 @@ int __old_sem_init(old_sem_t *sem, int p
 	errno = ENOSYS;
 	return -1;
     }
-  sem->sem_spinlock = __LT_SPINLOCK_INIT;
+  sem->sem_spinlock = __ATOMIC_LOCK_INIT;
   sem->sem_status = ((long)value << 1) + 1;
   return 0;
 }
Index: linuxthreads/pt-machine.c
===================================================================
RCS file: /cvs/glibc/libc/linuxthreads/pt-machine.c,v
retrieving revision 1.2
diff -u -p -r1.2 pt-machine.c
--- pt-machine.c	2000/12/27 17:15:20	1.2
+++ pt-machine.c	2001/04/28 00:19:35
@@ -19,7 +19,9 @@
 
 #define PT_EI
 
-extern long int testandset (int *spinlock);
+#include <pthread.h>
+
+extern int try_lock(__atomic_lock_t *spinlock);
 extern int __compare_and_swap (long int *p, long int oldval, long int newval);
 
 #include <pt-machine.h>
Index: linuxthreads/pthread.c
===================================================================
RCS file: /cvs/glibc/libc/linuxthreads/pthread.c,v
retrieving revision 1.79
diff -u -p -r1.79 pthread.c
--- pthread.c	2001/04/23 18:50:30	1.79
+++ pthread.c	2001/04/28 00:19:36
@@ -412,11 +412,17 @@ static void pthread_initialize(void)
   /* Test if compare-and-swap is available */
   __pthread_has_cas = compare_and_swap_is_available();
 #endif
+#ifdef _STACK_GROWS_UP
+  /* The initial thread already has all the stack it needs */
+  __pthread_initial_thread_bos = (char *)
+    ((long)CURRENT_STACK_FRAME &~ (STACK_SIZE - 1));
+#else
   /* For the initial stack, reserve at least STACK_SIZE bytes of stack
      below the current stack address, and align that on a
      STACK_SIZE boundary. */
   __pthread_initial_thread_bos =
     (char *)(((long)CURRENT_STACK_FRAME - 2 * STACK_SIZE) & ~(STACK_SIZE - 1));
+#endif
   /* Update the descriptor for the initial thread. */
   __pthread_initial_thread.p_pid = __getpid();
   /* Likewise for the resolver state _res.  */
@@ -544,6 +550,11 @@ int __pthread_initialize_manager(void)
 			 THREAD_MANAGER_STACK_SIZE,
 			 CLONE_VM | CLONE_FS | CLONE_FILES | CLONE_SIGHAND,
 			 (void *)(long)manager_pipe[0]);
+#elif defined(_STACK_GROWS_UP)
+	  pid = __clone(__pthread_manager_event,
+			(void **) __pthread_manager_thread_bos,
+			CLONE_VM | CLONE_FS | CLONE_FILES | CLONE_SIGHAND,
+			(void *)(long)manager_pipe[0]);
 #else
 	  pid = __clone(__pthread_manager_event,
 			(void **) __pthread_manager_thread_tos,
@@ -580,6 +591,10 @@ int __pthread_initialize_manager(void)
 		     THREAD_MANAGER_STACK_SIZE,
 		     CLONE_VM | CLONE_FS | CLONE_FILES | CLONE_SIGHAND,
 		     (void *)(long)manager_pipe[0]);
+#elif defined(_STACK_GROWS_UP)
+      pid = __clone(__pthread_manager, (void **) __pthread_manager_thread_bos,
+		    CLONE_VM | CLONE_FS | CLONE_FILES | CLONE_SIGHAND,
+		    (void *)(long)manager_pipe[0]);
 #else
       pid = __clone(__pthread_manager, (void **) __pthread_manager_thread_tos,
 		    CLONE_VM | CLONE_FS | CLONE_FILES | CLONE_SIGHAND,
Index: linuxthreads/rwlock.c
===================================================================
RCS file: /cvs/glibc/libc/linuxthreads/rwlock.c,v
retrieving revision 1.18
diff -u -p -r1.18 rwlock.c
--- rwlock.c	2000/12/27 17:16:24	1.18
+++ rwlock.c	2001/04/28 00:19:36
@@ -36,9 +36,9 @@ static int rwlock_rd_extricate_func(void
   pthread_rwlock_t *rwlock = obj;
   int did_remove = 0;
 
-  __pthread_lock((struct _pthread_fastlock *) &rwlock->__rw_lock, NULL);
+  __pthread_lock(&rwlock->__rw_lock, NULL);
   did_remove = remove_from_queue(&rwlock->__rw_read_waiting, th);
-  __pthread_unlock((struct _pthread_fastlock *) &rwlock->__rw_lock);
+  __pthread_unlock(&rwlock->__rw_lock);
 
   return did_remove;
 }
@@ -48,9 +48,9 @@ static int rwlock_wr_extricate_func(void
   pthread_rwlock_t *rwlock = obj;
   int did_remove = 0;
 
-  __pthread_lock((struct _pthread_fastlock *) &rwlock->__rw_lock, NULL);
+  __pthread_lock(&rwlock->__rw_lock, NULL);
   did_remove = remove_from_queue(&rwlock->__rw_write_waiting, th);
-  __pthread_unlock((struct _pthread_fastlock *) &rwlock->__rw_lock);
+  __pthread_unlock(&rwlock->__rw_lock);
 
   return did_remove;
 }
Index: linuxthreads/semaphore.c
===================================================================
RCS file: /cvs/glibc/libc/linuxthreads/semaphore.c,v
retrieving revision 1.16
diff -u -p -r1.16 semaphore.c
--- semaphore.c	2001/04/10 21:12:00	1.16
+++ semaphore.c	2001/04/28 00:19:36
@@ -33,7 +33,7 @@ int __new_sem_init(sem_t *sem, int pshar
     errno = ENOSYS;
     return -1;
   }
-  __pthread_init_lock((struct _pthread_fastlock *) &sem->__sem_lock);
+  __pthread_init_lock(&sem->__sem_lock);
   sem->__sem_value = value;
   sem->__sem_waiting = NULL;
   return 0;
@@ -48,9 +48,9 @@ static int new_sem_extricate_func(void *
   sem_t *sem = obj;
   int did_remove = 0;
 
-  __pthread_lock((struct _pthread_fastlock *) &sem->__sem_lock, self);
+  __pthread_lock(&sem->__sem_lock, self);
   did_remove = remove_from_queue(&sem->__sem_waiting, th);
-  __pthread_unlock((struct _pthread_fastlock *) &sem->__sem_lock);
+  __pthread_unlock(&sem->__sem_lock);
 
   return did_remove;
 }
@@ -66,10 +66,10 @@ int __new_sem_wait(sem_t * sem)
   extr.pu_object = sem;
   extr.pu_extricate_func = new_sem_extricate_func;
 
-  __pthread_lock((struct _pthread_fastlock *) &sem->__sem_lock, self);
+  __pthread_lock(&sem->__sem_lock, self);
   if (sem->__sem_value > 0) {
     sem->__sem_value--;
-    __pthread_unlock((struct _pthread_fastlock *) &sem->__sem_lock);
+    __pthread_unlock(&sem->__sem_lock);
     return 0;
   }
   /* Register extrication interface */
@@ -81,7 +81,7 @@ int __new_sem_wait(sem_t * sem)
     enqueue(&sem->__sem_waiting, self);
   else
     already_canceled = 1;
-  __pthread_unlock((struct _pthread_fastlock *) &sem->__sem_lock);
+  __pthread_unlock(&sem->__sem_lock);
 
   if (already_canceled) {
     __pthread_set_own_extricate_if(self, 0);
@@ -121,7 +121,7 @@ int __new_sem_trywait(sem_t * sem)
 {
   int retval;
 
-  __pthread_lock((struct _pthread_fastlock *) &sem->__sem_lock, NULL);
+  __pthread_lock(&sem->__sem_lock, NULL);
   if (sem->__sem_value == 0) {
     errno = EAGAIN;
     retval = -1;
@@ -129,7 +129,7 @@ int __new_sem_trywait(sem_t * sem)
     sem->__sem_value--;
     retval = 0;
   }
-  __pthread_unlock((struct _pthread_fastlock *) &sem->__sem_lock);
+  __pthread_unlock(&sem->__sem_lock);
   return retval;
 }
 
@@ -140,19 +140,19 @@ int __new_sem_post(sem_t * sem)
   struct pthread_request request;
 
   if (THREAD_GETMEM(self, p_in_sighandler) == NULL) {
-    __pthread_lock((struct _pthread_fastlock *) &sem->__sem_lock, self);
+    __pthread_lock(&sem->__sem_lock, self);
     if (sem->__sem_waiting == NULL) {
       if (sem->__sem_value >= SEM_VALUE_MAX) {
         /* Overflow */
         errno = ERANGE;
-        __pthread_unlock((struct _pthread_fastlock *) &sem->__sem_lock);
+        __pthread_unlock(&sem->__sem_lock);
         return -1;
       }
       sem->__sem_value++;
-      __pthread_unlock((struct _pthread_fastlock *) &sem->__sem_lock);
+      __pthread_unlock(&sem->__sem_lock);
     } else {
       th = dequeue(&sem->__sem_waiting);
-      __pthread_unlock((struct _pthread_fastlock *) &sem->__sem_lock);
+      __pthread_unlock(&sem->__sem_lock);
       th->p_sem_avail = 1;
       WRITE_MEMORY_BARRIER();
       restart(th);
@@ -214,17 +214,17 @@ int sem_timedwait(sem_t *sem, const stru
   int already_canceled = 0;
   int spurious_wakeup_count;
 
-  __pthread_lock((struct _pthread_fastlock *) &sem->__sem_lock, self);
+  __pthread_lock(&sem->__sem_lock, self);
   if (sem->__sem_value > 0) {
     --sem->__sem_value;
-    __pthread_unlock((struct _pthread_fastlock *) &sem->__sem_lock);
+    __pthread_unlock(&sem->__sem_lock);
     return 0;
   }
 
   if (abstime->tv_nsec < 0 || abstime->tv_nsec >= 1000000000) {
     /* The standard requires that if the function would block and the
        time value is illegal, the function returns with an error.  */
-    __pthread_unlock((struct _pthread_fastlock *) &sem->__sem_lock);
+    __pthread_unlock(&sem->__sem_lock);
     return EINVAL;
   }
 
@@ -241,7 +241,7 @@ int sem_timedwait(sem_t *sem, const stru
     enqueue(&sem->__sem_waiting, self);
   else
     already_canceled = 1;
-  __pthread_unlock((struct _pthread_fastlock *) &sem->__sem_lock);
+  __pthread_unlock(&sem->__sem_lock);
 
   if (already_canceled) {
     __pthread_set_own_extricate_if(self, 0);
@@ -257,9 +257,9 @@ int sem_timedwait(sem_t *sem, const stru
 	/* __pthread_lock will queue back any spurious restarts that
 	   may happen to it. */
 
-	__pthread_lock((struct _pthread_fastlock *)&sem->__sem_lock, self);
+	__pthread_lock(&sem->__sem_lock, self);
 	was_on_queue = remove_from_queue(&sem->__sem_waiting, self);
-	__pthread_unlock((struct _pthread_fastlock *)&sem->__sem_lock);
+	__pthread_unlock(&sem->__sem_lock);
 
 	if (was_on_queue) {
 	  __pthread_set_own_extricate_if(self, 0);
Index: linuxthreads/semaphore.h
===================================================================
RCS file: /cvs/glibc/libc/linuxthreads/semaphore.h,v
retrieving revision 1.10
diff -u -p -r1.10 semaphore.h
--- semaphore.h	2001/01/27 05:39:52	1.10
+++ semaphore.h	2001/04/28 00:19:36
@@ -31,11 +31,7 @@ typedef struct _pthread_descr_struct *_p
 /* System specific semaphore definition.  */
 typedef struct
 {
-  struct
-  {
-    long int __status;
-    int __spinlock;
-  } __sem_lock;
+  struct _pthread_fastlock __sem_lock;
   int __sem_value;
   _pthread_descr __sem_waiting;
 } sem_t;
Index: linuxthreads/spinlock.c
===================================================================
RCS file: /cvs/glibc/libc/linuxthreads/spinlock.c,v
retrieving revision 1.32
diff -u -p -r1.32 spinlock.c
--- spinlock.c	2001/02/19 18:47:27	1.32
+++ spinlock.c	2001/04/28 00:19:36
@@ -25,7 +25,7 @@
 #include "restart.h"
 
 #if !defined HAS_COMPARE_AND_SWAP || defined TEST_FOR_COMPARE_AND_SWAP
-static void __pthread_acquire(int * spinlock);
+static void __pthread_acquire(__atomic_lock_t * spinlock);
 #endif
 
 
@@ -156,7 +156,7 @@ int __pthread_unlock(struct _pthread_fas
 #if !defined HAS_COMPARE_AND_SWAP || defined TEST_FOR_COMPARE_AND_SWAP
   {
     WRITE_MEMORY_BARRIER();
-    lock->__spinlock = __LT_SPINLOCK_INIT;
+    release_lock(&lock->__spinlock);
     return 0;
   }
 #endif
@@ -239,12 +239,12 @@ again:
 struct wait_node {
   struct wait_node *next;	/* Next node in null terminated linked list */
   pthread_descr thr;		/* The thread waiting with this node */
-  int abandoned;		/* Atomic flag */
+  __atomic_lock_t abandoned;		/* Atomic flag */
 };
 
 static long wait_node_free_list;
 #if !defined HAS_COMPARE_AND_SWAP || defined TEST_FOR_COMPARE_AND_SWAP
-static int wait_node_free_list_spinlock;
+static __atomic_lock_t wait_node_free_list_spinlock;
 #endif
 
 /* Allocate a new node from the head of the free list using an atomic
@@ -272,7 +272,7 @@ static struct wait_node *wait_node_alloc
       wait_node_free_list = (long) new_node->next;
     }
     WRITE_MEMORY_BARRIER();
-    wait_node_free_list_spinlock = 0;
+    release_lock(&wait_node_free_list_spinlock);
 
     if (new_node == 0)
       return malloc(sizeof *wait_node_alloc());
@@ -314,7 +314,7 @@ static void wait_node_free(struct wait_n
     wn->next = (struct wait_node *) wait_node_free_list;
     wait_node_free_list = (long) wn;
     WRITE_MEMORY_BARRIER();
-    wait_node_free_list_spinlock = 0;
+    release_lock(&wait_node_free_list_spinlock);
     return;
   }
 #endif
@@ -387,7 +387,7 @@ void __pthread_alt_lock(struct _pthread_
       if (self == NULL)
 	self = thread_self();
 
-      wait_node.abandoned = 0;
+      release_lock(&wait_node.abandoned);
       wait_node.next = (struct wait_node *) lock->__status;
       wait_node.thr = self;
       lock->__status = (long) &wait_node;
@@ -395,7 +395,7 @@ void __pthread_alt_lock(struct _pthread_
     }
 
     WRITE_MEMORY_BARRIER();
-    lock->__spinlock = __LT_SPINLOCK_INIT;
+    release_lock(&lock->__spinlock);
 
     if (suspend_needed)
       suspend (self);
@@ -414,7 +414,7 @@ void __pthread_alt_lock(struct _pthread_
       wait_node.thr = self;
       newstatus = (long) &wait_node;
     }
-    wait_node.abandoned = 0;
+    release_lock(&wait_node.abandoned);
     wait_node.next = (struct wait_node *) oldstatus;
     /* Make sure the store in wait_node.next completes before performing
        the compare-and-swap */
@@ -461,7 +461,7 @@ int __pthread_alt_timedlock(struct _pthr
       if (self == NULL)
 	self = thread_self();
 
-      p_wait_node->abandoned = 0;
+      release_lock(&p_wait_node->abandoned);
       p_wait_node->next = (struct wait_node *) lock->__status;
       p_wait_node->thr = self;
       lock->__status = (long) p_wait_node;
@@ -469,7 +469,7 @@ int __pthread_alt_timedlock(struct _pthr
     }
 
     WRITE_MEMORY_BARRIER();
-    lock->__spinlock = __LT_SPINLOCK_INIT;
+    release_lock(&lock->__spinlock);
     goto suspend;
   }
 #endif
@@ -485,7 +485,7 @@ int __pthread_alt_timedlock(struct _pthr
       p_wait_node->thr = self;
       newstatus = (long) p_wait_node;
     }
-    p_wait_node->abandoned = 0;
+    release_lock(&p_wait_node->abandoned);
     p_wait_node->next = (struct wait_node *) oldstatus;
     /* Make sure the store in wait_node.next completes before performing
        the compare-and-swap */
@@ -506,7 +506,7 @@ int __pthread_alt_timedlock(struct _pthr
 
   if (oldstatus != 0) {
     if (timedsuspend(self, abstime) == 0) {
-      if (!testandset(&p_wait_node->abandoned))
+      if (!try_lock(&p_wait_node->abandoned))
 	return 0; /* Timeout! */
 
       /* Eat oustanding resume from owner, otherwise wait_node_free() below
@@ -579,7 +579,7 @@ void __pthread_alt_unlock(struct _pthrea
     while (p_node != (struct wait_node *) 1) {
       int prio;
 
-      if (p_node->abandoned) {
+      if (lock_held(&p_node->abandoned)) {
 	/* Remove abandoned node. */
 #if defined TEST_FOR_COMPARE_AND_SWAP
 	if (!__pthread_has_cas)
@@ -625,7 +625,7 @@ void __pthread_alt_unlock(struct _pthrea
        thread timed out and abandoned the node in which case we repeat the
        whole unlock operation. */
 
-    if (!testandset(&p_max_prio->abandoned)) {
+    if (!try_lock(&p_max_prio->abandoned)) {
 #if defined TEST_FOR_COMPARE_AND_SWAP
       if (!__pthread_has_cas)
 #endif
@@ -650,7 +650,7 @@ void __pthread_alt_unlock(struct _pthrea
 #if !defined HAS_COMPARE_AND_SWAP || defined TEST_FOR_COMPARE_AND_SWAP
   {
     WRITE_MEMORY_BARRIER();
-    lock->__spinlock = __LT_SPINLOCK_INIT;
+    release_lock(&lock->__spinlock);
   }
 #endif
 }
@@ -665,10 +665,10 @@ int __pthread_has_cas = 0;
 #if !defined HAS_COMPARE_AND_SWAP || defined TEST_FOR_COMPARE_AND_SWAP
 
 int __pthread_compare_and_swap(long * ptr, long oldval, long newval,
-                               int * spinlock)
+                               __atomic_lock_t * spinlock)
 {
   int res;
-  if (testandset(spinlock)) __pthread_acquire(spinlock);
+  if (try_lock(spinlock)) __pthread_acquire(spinlock);
   if (*ptr == oldval) {
     *ptr = newval; res = 1;
   } else {
@@ -676,7 +676,7 @@ int __pthread_compare_and_swap(long * pt
   }
   /* Prevent reordering of store to *ptr above and store to *spinlock below */
   WRITE_MEMORY_BARRIER();
-  *spinlock = 0;
+  release_lock(spinlock);
   return res;
 }
 
@@ -701,12 +701,12 @@ int __pthread_compare_and_swap(long * pt
    - When nanosleep() returns, we try again, doing MAX_SPIN_COUNT
      sched_yield(), then sleeping again if needed. */
 
-static void __pthread_acquire(int * spinlock)
+static void __pthread_acquire(__atomic_lock_t * spinlock)
 {
   int cnt = 0;
   struct timespec tm;
 
-  while (testandset(spinlock)) {
+  while (try_lock(spinlock)) {
     if (cnt < MAX_SPIN_COUNT) {
       sched_yield();
       cnt++;
Index: linuxthreads/spinlock.h
===================================================================
RCS file: /cvs/glibc/libc/linuxthreads/spinlock.h,v
retrieving revision 1.22
diff -u -p -r1.22 spinlock.h
--- spinlock.h	2001/01/28 08:52:05	1.22
+++ spinlock.h	2001/04/28 00:19:36
@@ -14,6 +14,7 @@
 
 #include <bits/initspin.h>
 
+#define release_lock(p) *p = __ATOMIC_LOCK_INIT;
 
 /* There are 2 compare and swap synchronization primitives with
    different semantics:
@@ -37,10 +38,10 @@
 
 extern int __pthread_has_cas;
 extern int __pthread_compare_and_swap(long * ptr, long oldval, long newval,
-                                      int * spinlock);
+                                      __atomic_lock_t * spinlock);
 
 static inline int compare_and_swap(long * ptr, long oldval, long newval,
-                                   int * spinlock)
+                                   __atomic_lock_t * spinlock)
 {
   if (__builtin_expect (__pthread_has_cas, 1))
     return __compare_and_swap(ptr, oldval, newval);
@@ -50,15 +51,11 @@ static inline int compare_and_swap(long 
 
 #elif defined(HAS_COMPARE_AND_SWAP)
 
-#ifdef IMPLEMENT_TAS_WITH_CAS
-#define testandset(p) !__compare_and_swap((long int *) p, 0, 1)
-#endif
-
 #ifdef HAS_COMPARE_AND_SWAP_WITH_RELEASE_SEMANTICS
 
 static inline int
 compare_and_swap_with_release_semantics (long * ptr, long oldval,
-					 long newval, int * spinlock)
+					 long newval, __atomic_lock_t * spinlock)
 {
   return __compare_and_swap_with_release_semantics (ptr, oldval,
 						    newval);
@@ -67,7 +64,7 @@ compare_and_swap_with_release_semantics 
 #endif
 
 static inline int compare_and_swap(long * ptr, long oldval, long newval,
-                                   int * spinlock)
+                                   __atomic_lock_t * spinlock)
 {
   return __compare_and_swap(ptr, oldval, newval);
 }
@@ -75,10 +72,10 @@ static inline int compare_and_swap(long 
 #else
 
 extern int __pthread_compare_and_swap(long * ptr, long oldval, long newval,
-                                      int * spinlock);
+                                      __atomic_lock_t * spinlock);
 
 static inline int compare_and_swap(long * ptr, long oldval, long newval,
-                                   int * spinlock)
+                                   __atomic_lock_t * spinlock)
 {
   return __pthread_compare_and_swap(ptr, oldval, newval, spinlock);
 }
@@ -99,7 +96,7 @@ extern int __pthread_unlock(struct _pthr
 static inline void __pthread_init_lock(struct _pthread_fastlock * lock)
 {
   lock->__status = 0;
-  lock->__spinlock = __LT_SPINLOCK_INIT;
+  lock->__spinlock = __ATOMIC_LOCK_INIT;
 }
 
 static inline int __pthread_trylock (struct _pthread_fastlock * lock)
@@ -113,7 +110,7 @@ static inline int __pthread_trylock (str
 #endif
 #if !defined HAS_COMPARE_AND_SWAP || defined TEST_FOR_COMPARE_AND_SWAP
   {
-    return (testandset(&lock->__spinlock) ? EBUSY : 0);
+    return (try_lock(&lock->__spinlock) ? EBUSY : 0);
   }
 #endif
 
@@ -141,7 +138,7 @@ extern void __pthread_alt_unlock(struct 
 static inline void __pthread_alt_init_lock(struct _pthread_fastlock * lock)
 {
   lock->__status = 0;
-  lock->__spinlock = __LT_SPINLOCK_INIT;
+  lock->__spinlock = __ATOMIC_LOCK_INIT;
 }
 
 static inline int __pthread_alt_trylock (struct _pthread_fastlock * lock)
@@ -157,7 +154,7 @@ static inline int __pthread_alt_trylock 
   {
     int res = EBUSY;
 
-    if (testandset(&lock->__spinlock) == 0)
+    if (try_lock(&lock->__spinlock) == 0)
       {
 	if (lock->__status == 0)
 	  {
@@ -165,7 +162,7 @@ static inline int __pthread_alt_trylock 
 	    WRITE_MEMORY_BARRIER();
 	    res = 0;
 	  }
-	lock->__spinlock = __LT_SPINLOCK_INIT;
+	release_lock(&lock->__spinlock);
       }
     return res;
   }
Index: linuxthreads/sysdeps/alpha/pt-machine.h
===================================================================
RCS file: /cvs/glibc/libc/linuxthreads/sysdeps/alpha/pt-machine.h,v
retrieving revision 1.6
diff -u -p -r1.6 pt-machine.h
--- pt-machine.h	2000/04/15 16:49:47	1.6
+++ pt-machine.h	2001/04/28 00:19:36
@@ -39,8 +39,8 @@ register char *stack_pointer __asm__("$3
 
 
 /* Spinlock implementation; required.  */
-PT_EI long int
-testandset (int *spinlock)
+PT_EI int
+try_lock (int *spinlock)
 {
   long int ret, temp;
 
@@ -58,9 +58,10 @@ testandset (int *spinlock)
 	: "m"(*spinlock)
         : "memory");
 
-  return ret;
+  return (int) ret;
 }
 
+#define lock_held(p) p
 
 /* Begin allocating thread stacks at this address.  Default is to allocate
    them just below the initial program stack.  */
Index: linuxthreads/sysdeps/arm/pt-machine.h
===================================================================
RCS file: /cvs/glibc/libc/linuxthreads/sysdeps/arm/pt-machine.h,v
retrieving revision 1.3
diff -u -p -r1.3 pt-machine.h
--- pt-machine.h	2000/12/18 05:55:14	1.3
+++ pt-machine.h	2001/04/28 00:19:36
@@ -29,8 +29,8 @@
    time; let's hope nobody tries to use one.  */
 
 /* Spinlock implementation; required.  */
-PT_EI long int
-testandset (int *spinlock)
+PT_EI int
+try_lock(__atomic_lock_t *spinlock)
 {
   register unsigned int ret;
 
@@ -41,6 +41,7 @@ testandset (int *spinlock)
   return ret;
 }
 
+#define lock_held(p) p
 
 /* Get some notion of the current stack.  Need not be exactly the top
    of the stack, just something somewhere in the current frame.  */
Index: linuxthreads/sysdeps/cris/pt-machine.h
===================================================================
RCS file: /cvs/glibc/libc/linuxthreads/sysdeps/cris/pt-machine.h,v
retrieving revision 1.1
diff -u -p -r1.1 pt-machine.h
--- pt-machine.h	2001/04/09 04:52:20	1.1
+++ pt-machine.h	2001/04/28 00:19:36
@@ -22,10 +22,10 @@
 # define PT_EI extern inline
 #endif
 
-PT_EI long int
-testandset (int *spinlock)
+PT_EI int
+try_lock(__atomic_lock_t *spinlock)
 {
-  register unsigned long int ret;
+  register unsigned int ret;
 
   /* Note the use of a dummy output of *spinlock to expose the write.  The
      memory barrier is to stop *other* writes being moved past this code.  */
@@ -42,6 +42,7 @@ testandset (int *spinlock)
   return ret;
 }
 
+#define lock_held(p) p
 
 /* Get some notion of the current stack.  Need not be exactly the top
    of the stack, just something somewhere in the current frame.
Index: linuxthreads/sysdeps/hppa/pspinlock.c
===================================================================
RCS file: /cvs/glibc/libc/linuxthreads/sysdeps/hppa/pspinlock.c,v
retrieving revision 1.2
diff -u -p -r1.2 pspinlock.c
--- pspinlock.c	2000/12/27 17:17:17	1.2
+++ pspinlock.c	2001/04/28 00:19:40
@@ -21,18 +21,20 @@
 #include <pthread.h>
 #include "internals.h"
 
+/* LDCW, the only atomic read-write operation PA-RISC has. *sigh*.  */
+#define __ldcw(a) ({ \
+	unsigned __ret; \
+	__asm__ __volatile__("ldcw 0(%1),%0" : "=r" (__ret) : "r" (a)); \
+	__ret; \
+})
+
 int
 __pthread_spin_lock (pthread_spinlock_t *lock)
 {
-  unsigned int val;
-
-  do
-    asm volatile ("ldcw %1,%0"
-		  : "=r" (val), "=m" (*lock)
-		  : "m" (*lock));
-  while (!val);
+	while (__ldcw (*lock) == 0)
+		while (*lock == 0) ;
 
-  return 0;
+	return 0;
 }
 weak_alias (__pthread_spin_lock, pthread_spin_lock)
 
@@ -40,11 +42,7 @@ weak_alias (__pthread_spin_lock, pthread
 int
 __pthread_spin_trylock (pthread_spinlock_t *lock)
 {
-  unsigned int val;
-
-  asm volatile ("ldcw %1,%0"
-		: "=r" (val), "=m" (*lock)
-		: "m" (*lock));
+  unsigned int val = __ldcw(*lock);
 
   return val ? 0 : EBUSY;
 }
Index: linuxthreads/sysdeps/hppa/pt-machine.h
===================================================================
RCS file: /cvs/glibc/libc/linuxthreads/sysdeps/hppa/pt-machine.h,v
retrieving revision 1.2
diff -u -p -r1.2 pt-machine.h
--- pt-machine.h	2000/12/18 05:55:14	1.2
+++ pt-machine.h	2001/04/28 00:19:40
@@ -19,6 +19,7 @@
    write to the Free Software Foundation, Inc., 59 Temple Place - Suite 330,
    Boston, MA 02111-1307, USA.  */
 
+#include <sys/types.h>
 #include <bits/initspin.h>
 
 #ifndef PT_EI
@@ -30,16 +31,15 @@
 #define CURRENT_STACK_FRAME  stack_pointer
 register char * stack_pointer __asm__ ("%r30");
 
+#define release_lock(p) *p = __ATOMIC_LOCK_INIT;
 
 /* The hppa only has one atomic read and modify memory operation,
    load and clear, so hppa spinlocks must use zero to signify that
    someone is holding the lock.  */
 
-#define xstr(s) str(s)
-#define str(s) #s
 /* Spinlock implementation; required.  */
-PT_EI long int
-testandset (int *spinlock)
+PT_EI int
+try_lock(__atomic_lock_t *spinlock)
 {
   int ret;
 
@@ -50,5 +50,9 @@ testandset (int *spinlock)
 
   return ret == 0;
 }
-#undef str
-#undef xstr
+
+PT_EI int
+lock_held(__atomic_lock_t *spinlock)
+{
+	return spinlock->lock == 0;
+}
Index: linuxthreads/sysdeps/i386/pt-machine.h
===================================================================
RCS file: /cvs/glibc/libc/linuxthreads/sysdeps/i386/pt-machine.h,v
retrieving revision 1.10
diff -u -p -r1.10 pt-machine.h
--- pt-machine.h	2001/04/12 19:43:07	1.10
+++ pt-machine.h	2001/04/28 00:19:40
@@ -29,10 +29,10 @@
 
 
 /* Spinlock implementation; required.  */
-PT_EI long int
-testandset (int *spinlock)
+PT_EI int
+try_lock(__atomic_lock_t *spinlock)
 {
-  long int ret;
+  int ret;
 
   __asm__ __volatile__(
        "xchgl %0, %1"
@@ -43,6 +43,7 @@ testandset (int *spinlock)
   return ret;
 }
 
+#define lock_held(p) p
 
 /* Compare-and-swap for semaphores.
    Available on the 486 and above, but not on the 386.
Index: linuxthreads/sysdeps/ia64/pt-machine.h
===================================================================
RCS file: /cvs/glibc/libc/linuxthreads/sysdeps/ia64/pt-machine.h,v
retrieving revision 1.2
diff -u -p -r1.2 pt-machine.h
--- pt-machine.h	2000/12/18 22:35:59	1.2
+++ pt-machine.h	2001/04/28 00:19:40
@@ -91,8 +91,8 @@ __compare_and_swap_with_release_semantic
 #endif /* ELF_MACHINE_NAME */
 
 /* Spinlock implementation; required.  */
-PT_EI long int
-testandset (int *spinlock)
+PT_EI int
+try_lock(__atomic_lock_t *spinlock)
 {
   long int ret;
 
@@ -102,5 +102,7 @@ testandset (int *spinlock)
        : "r"(1), "1"(__atomic_fool_gcc (spinlock))
        : "memory");
 
-  return ret;
+  return (int) ret;
 }
+
+#define lock_held(p) p
Index: linuxthreads/sysdeps/m68k/pt-machine.h
===================================================================
RCS file: /cvs/glibc/libc/linuxthreads/sysdeps/m68k/pt-machine.h,v
retrieving revision 1.3
diff -u -p -r1.3 pt-machine.h
--- pt-machine.h	2000/12/18 05:55:14	1.3
+++ pt-machine.h	2001/04/28 00:19:40
@@ -25,8 +25,8 @@
 
 
 /* Spinlock implementation; required.  */
-PT_EI long int
-testandset (int *spinlock)
+PT_EI int
+try_lock(__atomic_lock_t *spinlock)
 {
   char ret;
 
@@ -38,6 +38,7 @@ testandset (int *spinlock)
   return ret;
 }
 
+#define lock_held(p) p
 
 /* Get some notion of the current stack.  Need not be exactly the top
    of the stack, just something somewhere in the current frame.  */
Index: linuxthreads/sysdeps/mips/pt-machine.h
===================================================================
RCS file: /cvs/glibc/libc/linuxthreads/sysdeps/mips/pt-machine.h,v
retrieving revision 1.6
diff -u -p -r1.6 pt-machine.h
--- pt-machine.h	2000/12/05 18:00:31	1.6
+++ pt-machine.h	2001/04/28 00:19:40
@@ -35,10 +35,10 @@
 
 #if (_MIPS_ISA >= _MIPS_ISA_MIPS2)
 
-PT_EI long int
-testandset (int *spinlock)
+PT_EI int
+try_lock(__atomic_lock_t *spinlock)
 {
-  long int ret, temp;
+  int ret, temp;
 
   __asm__ __volatile__
     ("/* Inline spinlock test & set */\n\t"
@@ -69,6 +69,7 @@ testandset (int *spinlock)
 }
 #endif /* !(_MIPS_ISA >= _MIPS_ISA_MIPS2) */
 
+#define lock_held(p) p
 
 /* Get some notion of the current stack.  Need not be exactly the top
    of the stack, just something somewhere in the current frame.  */
Index: linuxthreads/sysdeps/powerpc/pt-machine.h
===================================================================
RCS file: /cvs/glibc/libc/linuxthreads/sysdeps/powerpc/pt-machine.h,v
retrieving revision 1.7
diff -u -p -r1.7 pt-machine.h
--- pt-machine.h	2000/07/22 02:24:23	1.7
+++ pt-machine.h	2001/04/28 00:19:40
@@ -39,7 +39,13 @@ register char * stack_pointer __asm__ ("
 /* note that test-and-set(x) is the same as !compare-and-swap(x, 0, 1) */
 
 #define HAS_COMPARE_AND_SWAP
-#define IMPLEMENT_TAS_WITH_CAS
+
+static inline int try_lock(__atomic_lock_t *p)
+{
+  return !__compare_and_swap((long int *) p, 0, 1):
+}
+
+#define lock_held(p) p
 
 #if BROKEN_PPC_ASM_CR0
 static
Index: linuxthreads/sysdeps/pthread/bits/initspin.h
===================================================================
RCS file: /cvs/glibc/libc/linuxthreads/sysdeps/pthread/bits/initspin.h,v
retrieving revision 1.2
diff -u -p -r1.2 initspin.h
--- initspin.h	2001/01/28 08:47:31	1.2
+++ initspin.h	2001/04/28 00:19:40
@@ -20,9 +20,8 @@
 /* Initial value of a spinlock.  Most platforms should use zero,
    unless they only implement a "test and clear" operation instead of
    the usual "test and set". */
-#define __LT_SPINLOCK_INIT 0
+#define __ATOMIC_LOCK_INIT  0
 
 /* Macros for lock initializers, using the above definition. */
-#define __LOCK_INITIALIZER { 0, __LT_SPINLOCK_INIT }
-#define __ALT_LOCK_INITIALIZER { 0, __LT_SPINLOCK_INIT }
-#define __ATOMIC_INITIALIZER { 0, __LT_SPINLOCK_INIT }
+#define __LOCK_INITIALIZER { 0, 0 }
+#define __ATOMIC_INITIALIZER { 0, 0 }
Index: linuxthreads/sysdeps/pthread/bits/libc-lock.h
===================================================================
RCS file: /cvs/glibc/libc/linuxthreads/sysdeps/pthread/bits/libc-lock.h,v
retrieving revision 1.17
diff -u -p -r1.17 libc-lock.h
--- libc-lock.h	2001/01/28 16:39:07	1.17
+++ libc-lock.h	2001/04/28 00:19:40
@@ -55,12 +55,12 @@ typedef pthread_key_t __libc_key_t;
    initialized locks must be set to one due to the lack of normal
    atomic operations.) */
 
-#if __LT_SPINLOCK_INIT == 0
+#ifdef __LOCK_INITIALISER_NOT_ZERO
 #  define __libc_lock_define_initialized(CLASS,NAME) \
-  CLASS __libc_lock_t NAME;
+  CLASS __libc_lock_t NAME = PTHREAD_MUTEX_INITIALIZER;
 #else
 #  define __libc_lock_define_initialized(CLASS,NAME) \
-  CLASS __libc_lock_t NAME = PTHREAD_MUTEX_INITIALIZER;
+  CLASS __libc_lock_t NAME;
 #endif
 
 #define __libc_rwlock_define_initialized(CLASS,NAME) \
Index: linuxthreads/sysdeps/pthread/bits/pthreadtypes.h
===================================================================
RCS file: /cvs/glibc/libc/linuxthreads/sysdeps/pthread/bits/pthreadtypes.h,v
retrieving revision 1.9
diff -u -p -r1.9 pthreadtypes.h
--- pthreadtypes.h	2001/01/27 06:26:13	1.9
+++ pthreadtypes.h	2001/04/28 00:19:40
@@ -22,11 +22,13 @@
 #define __need_schedparam
 #include <bits/sched.h>
 
+typedef int __atomic_lock_t;
+
 /* Fast locks (not abstract because mutexes and conditions aren't abstract). */
 struct _pthread_fastlock
 {
   long int __status;   /* "Free" or "taken" or head of waiting list */
-  int __spinlock;      /* Used by compare_and_swap emulation. Also,
+  __atomic_lock_t __spinlock;  /* Used by compare_and_swap emulation. Also,
 			  adaptive SMP lock stores spin count here. */
 };
 
Index: linuxthreads/sysdeps/sh/pt-machine.h
===================================================================
RCS file: /cvs/glibc/libc/linuxthreads/sysdeps/sh/pt-machine.h,v
retrieving revision 1.4
diff -u -p -r1.4 pt-machine.h
--- pt-machine.h	2001/04/11 02:57:01	1.4
+++ pt-machine.h	2001/04/28 00:19:40
@@ -24,8 +24,8 @@
 #endif
 
 /* Spinlock implementation; required.  */
-PT_EI long int
-testandset (int *spinlock)
+PT_EI int
+try_lock(__atomic_lock_t *spinlock)
 {
   int ret;
 
@@ -39,6 +39,7 @@ testandset (int *spinlock)
   return (ret == 0);
 }
 
+#define lock_held(p) p
 
 /* Get some notion of the current stack.  Need not be exactly the top
    of the stack, just something somewhere in the current frame.  */
Index: linuxthreads/sysdeps/unix/sysv/linux/hppa/bits/initspin.h
===================================================================
RCS file: /cvs/glibc/libc/linuxthreads/sysdeps/unix/sysv/linux/hppa/bits/initspin.h,v
retrieving revision 1.2
diff -u -p -r1.2 initspin.h
--- initspin.h	2001/01/28 08:47:11	1.2
+++ initspin.h	2001/04/28 00:19:40
@@ -19,9 +19,12 @@
 
 /* Initial value of a spinlock.  PA-RISC only implements atomic load
    and clear so this must be non-zero. */
-#define __LT_SPINLOCK_INIT 1
+#define __ATOMIC_LOCK_INIT  ((__atomic_lock_t) { 1 })
 
 /* Macros for lock initializers, using the above definition. */
-#define __LOCK_INITIALIZER { 0, __LT_SPINLOCK_INIT }
-#define __ALT_LOCK_INITIALIZER { 0, __LT_SPINLOCK_INIT }
-#define __ATOMIC_INITIALIZER { 0, __LT_SPINLOCK_INIT }
+#define __LOCK_INITIALIZER { { 1 }, 0 }
+#define __ATOMIC_INITIALIZER { 0, { 1 } }
+
+/* Tell the generic code it can't put locks in the bss section */
+
+#define __LOCK_INITIALISER_NOT_ZERO
Index: sunrpc/Makefile
===================================================================
RCS file: /cvs/glibc/libc/sunrpc/Makefile,v
retrieving revision 1.67
diff -u -p -r1.67 Makefile
--- Makefile	2001/03/20 18:31:37	1.67
+++ Makefile	2001/04/28 00:19:47
@@ -126,8 +126,7 @@ $(objpfx)rpcgen: $(addprefix $(objpfx),$
 	$(+link)
 
 # Tell rpcgen where to find the C preprocessor.
-rpcgen-cmd = $(built-program-cmd) -Y `$(CC) -print-file-name=cpp | \
-				      sed 's|/cpp$$||'`
+rpcgen-cmd = $(built-program-cmd) -Y /usr/bin
 
 # Install the rpc data base file.
 $(inst_sysconfdir)/rpc: etc.rpc $(+force)
Index: sysdeps/hppa/dl-fptr.c
===================================================================
RCS file: /cvs/glibc/libc/sysdeps/hppa/dl-fptr.c,v
retrieving revision 1.1
diff -u -p -r1.1 dl-fptr.c
--- dl-fptr.c	2000/10/15 03:18:05	1.1
+++ dl-fptr.c	2001/04/28 00:19:47
@@ -29,8 +29,7 @@
 #ifdef _LIBC_REENTRANT
 # include <pt-machine.h>
 
-/* Remember, we use 0 to mean that a lock is taken on PA-RISC. */
-static int __hppa_fptr_lock = 1;
+static __atomic_lock_t __hppa_fptr_lock = __ATOMIC_LOCK_INIT;
 #endif
 
 /* Because ld.so is now versioned, these functions can be in their own
@@ -66,7 +65,7 @@ __hppa_make_fptr (const struct link_map 
 #ifdef _LIBC_REENTRANT
   /* Make sure we are alone. We don't need a lock during bootstrap. */
   if (mem == NULL)
-    while (testandset (&__hppa_fptr_lock));
+    while (try_lock(&__hppa_fptr_lock));
 #endif
 
   /* Search the sorted linked list for an existing entry for this
@@ -126,9 +125,8 @@ __hppa_make_fptr (const struct link_map 
 
 found:
 #ifdef _LIBC_REENTRANT
-  /* Release the lock.  Again, remember, zero means the lock is taken!  */
   if (mem == NULL)
-    __hppa_fptr_lock = 1;
+    __hppa_fptr_lock = __ATOMIC_LOCK_INIT;
 #endif
 
   /* Set bit 30 to indicate to $$dyncall that this is a PLABEL. */
@@ -147,7 +145,7 @@ _dl_unmap (struct link_map *map)
 
 #ifdef _LIBC_REENTRANT
   /* Make sure we are alone.  */
-  while (testandset (&__hppa_fptr_lock));
+  while (try_lock(&__hppa_fptr_lock));
 #endif
 
   /* Search the sorted linked list for the first entry for this object.  */
@@ -180,8 +178,7 @@ _dl_unmap (struct link_map *map)
     }
 
 #ifdef _LIBC_REENTRANT
-  /* Release the lock. */
-  __hppa_fptr_lock = 1;
+  __hppa_fptr_lock = __ATOMIC_LOCK_INIT;
 #endif
 }
 
@@ -193,7 +190,7 @@ _dl_lookup_address (const void *address)
 
 #ifdef _LIBC_REENTRANT
   /* Make sure we are alone.  */
-  while (testandset (&__hppa_fptr_lock));
+  while (try_lock(&__hppa_fptr_lock));
 #endif
 
   for (f = __fptr_root; f != NULL; f = f->next)
@@ -204,8 +201,7 @@ _dl_lookup_address (const void *address)
       }
 
 #ifdef _LIBC_REENTRANT
-  /* Release the lock.   */
-  __hppa_fptr_lock = 1;
+  __hppa_fptr_lock = __ATOMIC_LOCK_INIT;
 #endif
 
   return addr;
Index: sysdeps/ia64/dl-fptr.c
===================================================================
RCS file: /cvs/glibc/libc/sysdeps/ia64/dl-fptr.c,v
retrieving revision 1.2
diff -u -p -r1.2 dl-fptr.c
--- dl-fptr.c	2000/09/26 19:02:08	1.2
+++ dl-fptr.c	2001/04/28 00:19:47
@@ -28,7 +28,7 @@
 #ifdef _LIBC_REENTRANT
 # include <pt-machine.h>
 
-static int __ia64_fptr_lock = 0;
+static __atomic_lock_t __ia64_fptr_lock = 0;
 #endif
 
 /* Because ld.so is now versioned, these functions can be in their own
@@ -67,7 +67,7 @@ __ia64_make_fptr (const struct link_map 
 #ifdef _LIBC_REENTRANT
   /* Make sure we are alone. We don't need a lock during bootstrap. */
   if (mem == NULL)
-    while (testandset (&__ia64_fptr_lock));
+    while (try_lock(&__ia64_fptr_lock));
 #endif
 
   /* Search the sorted linked list for an existing entry for this
@@ -127,9 +127,8 @@ __ia64_make_fptr (const struct link_map 
 
 found:
 #ifdef _LIBC_REENTRANT
-  /* Release the lock.  */
   if (mem == NULL)
-    __ia64_fptr_lock = 0;
+    __ia64_fptr_lock = __ATOMIC_LOCK_INIT;
 #endif
 
   return (Elf64_Addr) f;
@@ -147,7 +146,7 @@ _dl_unmap (struct link_map *map)
 
 #ifdef _LIBC_REENTRANT
   /* Make sure we are alone.  */
-  while (testandset (&__ia64_fptr_lock));
+  while (try_lock(&__ia64_fptr_lock));
 #endif
 
   /* Search the sorted linked list for the first entry for this object.  */
@@ -180,8 +179,7 @@ _dl_unmap (struct link_map *map)
     }
 
 #ifdef _LIBC_REENTRANT
-  /* Release the lock.   */
-  __ia64_fptr_lock = 0;
+  __ia64_fptr_lock = ATOMIC_LOCK_INIT;
 #endif
 }
 
@@ -193,7 +191,7 @@ _dl_lookup_address (const void *address)
 
 #ifdef _LIBC_REENTRANT
   /* Make sure we are alone.  */
-  while (testandset (&__ia64_fptr_lock));
+  while (try_lock(&__ia64_fptr_lock));
 #endif
 
   for (f = __fptr_root; f != NULL; f = f->next)
@@ -204,8 +202,7 @@ _dl_lookup_address (const void *address)
       }
 
 #ifdef _LIBC_REENTRANT
-  /* Release the lock.   */
-  __ia64_fptr_lock = 0;
+  __ia64_fptr_lock = __ATOMIC_LOCK_INIT;
 #endif
 
   return addr;
