? linuxthreads/sysdeps/unix/sysv/linux/hppa/bits/pthreadtypes.h
? sysdeps/hppa/stackinfo.h
? sysdeps/unix/sysv/linux/hppa/getdents64.c
? sysdeps/unix/sysv/linux/hppa/mmap64.c
? sysdeps/unix/sysv/linux/hppa/bits/resource.h
Index: linuxthreads/attr.c
===================================================================
RCS file: /cvs/glibc/libc/linuxthreads/attr.c,v
retrieving revision 1.22
diff -u -p -r1.22 attr.c
--- attr.c	2001/03/27 03:19:51	1.22
+++ attr.c	2001/04/25 06:51:16
@@ -298,7 +298,7 @@ int pthread_getattr_np (pthread_t thread
 #ifndef _STACK_GROWS_UP
   attr->__stackaddr = (char *)(descr + 1);
 #else
-# error __stackaddr not handled
+  attr->__stackaddr = (char *)descr;
 #endif
 
   return 0;
Index: linuxthreads/internals.h
===================================================================
RCS file: /cvs/glibc/libc/linuxthreads/internals.h,v
retrieving revision 1.66
diff -u -p -r1.66 internals.h
--- internals.h	2001/04/23 20:03:15	1.66
+++ internals.h	2001/04/25 06:51:17
@@ -27,7 +27,8 @@
 #include <sys/types.h>
 #include <bits/libc-tsd.h> /* for _LIBC_TSD_KEY_N */
 
-extern long int testandset (int *spinlock);
+extern long int testandset (_lt_spinlock_t *spinlock);
+extern long int test (_lt_spinlock_t *spinlock);
 extern int __compare_and_swap (long int *p, long int oldval, long int newval);
 
 #include "pt-machine.h"
@@ -106,7 +107,7 @@ typedef struct _pthread_extricate_struct
 
 struct pthread_atomic {
   long p_count;
-  int p_spinlock;
+  _lt_spinlock_t p_spinlock;
 };
 
 /* Context info for read write locks. The pthread_rwlock_info structure
@@ -384,7 +385,11 @@ static inline pthread_descr thread_self 
   else if (__pthread_nonstandard_stacks)
     return __pthread_find_self();
   else
+#ifdef _STACK_GROWS_DOWN
     return (pthread_descr)(((unsigned long)sp | (STACK_SIZE-1))+1) - 1;
+#else
+    return (pthread_descr)((unsigned long)sp &~ (STACK_SIZE-1));
+#endif
 #endif
 }
 
Index: linuxthreads/manager.c
===================================================================
RCS file: /cvs/glibc/libc/linuxthreads/manager.c,v
retrieving revision 1.69
diff -u -p -r1.69 manager.c
--- manager.c	2001/04/23 18:50:30	1.69
+++ manager.c	2001/04/25 06:51:17
@@ -401,7 +401,41 @@ static int pthread_allocate_stack(const 
 
       guardaddr = new_thread_bottom + stacksize/2;
       /* We leave the guard area in the middle unmapped.	*/
-#else  /* !NEED_SEPARATE_REGISTER_STACK */
+#elif defined (_STACK_GROWS_UP)
+
+      /* The thread description goes at the bottom of this area, and
+       * the stack  starts directly above it.  We neglect to map the last
+       * page of data in order to prevent a rogue thread overwriting
+       * another's stack data.
+       */
+      if (attr != NULL)
+	{
+	  guardsize = page_roundup (attr->__guardsize, granularity);
+	  stacksize = STACK_SIZE - guardsize;
+	  stacksize = MIN (stacksize,
+			   page_roundup (attr->__stacksize, granularity));
+	}
+      else
+	{
+	  guardsize = granularity;
+	  stacksize = STACK_SIZE - granularity;
+	}
+
+      new_thread = (pthread_descr)((unsigned long)default_new_thread &~ (STACK_
+SIZE - 1));
+      map_addr = mmap(new_thread, stacksize + guardsize,
+		      PROT_READ | PROT_WRITE | PROT_EXEC,
+		      MAP_FIXED | MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
+      if (map_addr == MAP_FAILED)
+	{
+	  return -1;
+	}
+
+      new_thread_bottom = map_addr + sizeof(*new_thread);
+      guardaddr = map_addr + stacksize;
+      guardsize = granularity;
+
+#else  /* !NEED_SEPARATE_REGISTER_STACK && !_STACK_GROWS_UP */
 # if FLOATING_STACKS
       if (attr != NULL)
 	{
@@ -591,6 +625,9 @@ static int pthread_handle_create(pthread
 			 (char *)new_thread - new_thread_bottom,
 			 CLONE_VM | CLONE_FS | CLONE_FILES | CLONE_SIGHAND |
 			 __pthread_sig_cancel, new_thread);
+#elif defined(_STACK_GROWS_UP)
+	  pid = __clone(pthread_start_thread_event, (void **) new_thread_bottom,			CLONE_VM | CLONE_FS | CLONE_FILES | CLONE_SIGHAND |
+			__pthread_sig_cancel, new_thread);
 #else
 	  pid = __clone(pthread_start_thread_event, (void **) new_thread,
 			CLONE_VM | CLONE_FS | CLONE_FILES | CLONE_SIGHAND |
@@ -627,6 +664,10 @@ static int pthread_handle_create(pthread
                      (char *)new_thread - new_thread_bottom,
 		     CLONE_VM | CLONE_FS | CLONE_FILES | CLONE_SIGHAND |
 		     __pthread_sig_cancel, new_thread);
+#elif defined(_STACK_GROWS_UP)
+      pid = __clone(pthread_start_thread, (void **) new_thread_bottom,
+		    CLONE_VM | CLONE_FS | CLONE_FILES | CLONE_SIGHAND |
+		    __pthread_sig_cancel, new_thread);
 #else
       pid = __clone(pthread_start_thread, (void **) new_thread,
 		    CLONE_VM | CLONE_FS | CLONE_FILES | CLONE_SIGHAND |
@@ -643,6 +684,9 @@ static int pthread_handle_create(pthread
 			    - new_thread_bottom);
 	munmap((caddr_t)new_thread_bottom,
 	       2 * stacksize + new_thread->p_guardsize);
+#elif defined(_STACK_GROWS_UP)
+	size_t stacksize = guardaddr - (char *)(new_thread);
+	munmap(new_thread, stacksize);
 #else
 	size_t stacksize = (char *)(new_thread+1) - new_thread_bottom;
 	munmap(new_thread_bottom - guardsize, guardsize + stacksize);
@@ -708,6 +752,10 @@ static void pthread_free(pthread_descr t
       size_t guardsize = th->p_guardsize;
       /* Free the stack and thread descriptor area */
       char *guardaddr = th->p_guardaddr;
+#ifdef _STACK_GROWS_UP
+      size_t stacksize = guardaddr - (char *)th;
+      guardaddr = (char *)th;
+#else
       /* Guardaddr is always set, even if guardsize is 0.  This allows
 	 us to compute everything else.  */
       size_t stacksize = (char *)(th+1) - guardaddr - guardsize;
@@ -715,6 +763,7 @@ static void pthread_free(pthread_descr t
       /* Take account of the register stack, which is below guardaddr.  */
       guardaddr -= stacksize;
       stacksize *= 2;
+#endif
 #endif
       /* Unmap the stack.  */
       munmap(guardaddr, stacksize + guardsize);
Index: linuxthreads/pt-machine.c
===================================================================
RCS file: /cvs/glibc/libc/linuxthreads/pt-machine.c,v
retrieving revision 1.2
diff -u -p -r1.2 pt-machine.c
--- pt-machine.c	2000/12/27 17:15:20	1.2
+++ pt-machine.c	2001/04/25 06:51:17
@@ -19,7 +19,9 @@
 
 #define PT_EI
 
-extern long int testandset (int *spinlock);
+#include <pthread.h>
+
+extern long int testandset (_lt_spinlock_t *spinlock);
 extern int __compare_and_swap (long int *p, long int oldval, long int newval);
 
 #include <pt-machine.h>
Index: linuxthreads/pthread.c
===================================================================
RCS file: /cvs/glibc/libc/linuxthreads/pthread.c,v
retrieving revision 1.79
diff -u -p -r1.79 pthread.c
--- pthread.c	2001/04/23 18:50:30	1.79
+++ pthread.c	2001/04/25 06:51:17
@@ -412,11 +412,17 @@ static void pthread_initialize(void)
   /* Test if compare-and-swap is available */
   __pthread_has_cas = compare_and_swap_is_available();
 #endif
+#ifdef _STACK_GROWS_UP
+  /* The initial thread already has all the stack it needs */
+  __pthread_initial_thread_bos = (char *)
+    ((long)CURRENT_STACK_FRAME &~ (STACK_SIZE - 1));
+#else
   /* For the initial stack, reserve at least STACK_SIZE bytes of stack
      below the current stack address, and align that on a
      STACK_SIZE boundary. */
   __pthread_initial_thread_bos =
     (char *)(((long)CURRENT_STACK_FRAME - 2 * STACK_SIZE) & ~(STACK_SIZE - 1));
+#endif
   /* Update the descriptor for the initial thread. */
   __pthread_initial_thread.p_pid = __getpid();
   /* Likewise for the resolver state _res.  */
@@ -544,6 +550,11 @@ int __pthread_initialize_manager(void)
 			 THREAD_MANAGER_STACK_SIZE,
 			 CLONE_VM | CLONE_FS | CLONE_FILES | CLONE_SIGHAND,
 			 (void *)(long)manager_pipe[0]);
+#elif defined(_STACK_GROWS_UP)
+	  pid = __clone(__pthread_manager_event,
+			(void **) __pthread_manager_thread_bos,
+			CLONE_VM | CLONE_FS | CLONE_FILES | CLONE_SIGHAND,
+			(void *)(long)manager_pipe[0]);
 #else
 	  pid = __clone(__pthread_manager_event,
 			(void **) __pthread_manager_thread_tos,
@@ -580,6 +591,10 @@ int __pthread_initialize_manager(void)
 		     THREAD_MANAGER_STACK_SIZE,
 		     CLONE_VM | CLONE_FS | CLONE_FILES | CLONE_SIGHAND,
 		     (void *)(long)manager_pipe[0]);
+#elif defined(_STACK_GROWS_UP)
+      pid = __clone(__pthread_manager, (void **) __pthread_manager_thread_bos,
+		    CLONE_VM | CLONE_FS | CLONE_FILES | CLONE_SIGHAND,
+		    (void *)(long)manager_pipe[0]);
 #else
       pid = __clone(__pthread_manager, (void **) __pthread_manager_thread_tos,
 		    CLONE_VM | CLONE_FS | CLONE_FILES | CLONE_SIGHAND,
Index: linuxthreads/rwlock.c
===================================================================
RCS file: /cvs/glibc/libc/linuxthreads/rwlock.c,v
retrieving revision 1.18
diff -u -p -r1.18 rwlock.c
--- rwlock.c	2000/12/27 17:16:24	1.18
+++ rwlock.c	2001/04/25 06:51:17
@@ -36,9 +36,9 @@ static int rwlock_rd_extricate_func(void
   pthread_rwlock_t *rwlock = obj;
   int did_remove = 0;
 
-  __pthread_lock((struct _pthread_fastlock *) &rwlock->__rw_lock, NULL);
+  __pthread_lock(&rwlock->__rw_lock, NULL);
   did_remove = remove_from_queue(&rwlock->__rw_read_waiting, th);
-  __pthread_unlock((struct _pthread_fastlock *) &rwlock->__rw_lock);
+  __pthread_unlock(&rwlock->__rw_lock);
 
   return did_remove;
 }
@@ -48,9 +48,9 @@ static int rwlock_wr_extricate_func(void
   pthread_rwlock_t *rwlock = obj;
   int did_remove = 0;
 
-  __pthread_lock((struct _pthread_fastlock *) &rwlock->__rw_lock, NULL);
+  __pthread_lock(&rwlock->__rw_lock, NULL);
   did_remove = remove_from_queue(&rwlock->__rw_write_waiting, th);
-  __pthread_unlock((struct _pthread_fastlock *) &rwlock->__rw_lock);
+  __pthread_unlock(&rwlock->__rw_lock);
 
   return did_remove;
 }
Index: linuxthreads/semaphore.c
===================================================================
RCS file: /cvs/glibc/libc/linuxthreads/semaphore.c,v
retrieving revision 1.16
diff -u -p -r1.16 semaphore.c
--- semaphore.c	2001/04/10 21:12:00	1.16
+++ semaphore.c	2001/04/25 06:51:17
@@ -33,7 +33,7 @@ int __new_sem_init(sem_t *sem, int pshar
     errno = ENOSYS;
     return -1;
   }
-  __pthread_init_lock((struct _pthread_fastlock *) &sem->__sem_lock);
+  __pthread_init_lock(&sem->__sem_lock);
   sem->__sem_value = value;
   sem->__sem_waiting = NULL;
   return 0;
@@ -48,9 +48,9 @@ static int new_sem_extricate_func(void *
   sem_t *sem = obj;
   int did_remove = 0;
 
-  __pthread_lock((struct _pthread_fastlock *) &sem->__sem_lock, self);
+  __pthread_lock(&sem->__sem_lock, self);
   did_remove = remove_from_queue(&sem->__sem_waiting, th);
-  __pthread_unlock((struct _pthread_fastlock *) &sem->__sem_lock);
+  __pthread_unlock(&sem->__sem_lock);
 
   return did_remove;
 }
@@ -66,10 +66,10 @@ int __new_sem_wait(sem_t * sem)
   extr.pu_object = sem;
   extr.pu_extricate_func = new_sem_extricate_func;
 
-  __pthread_lock((struct _pthread_fastlock *) &sem->__sem_lock, self);
+  __pthread_lock(&sem->__sem_lock, self);
   if (sem->__sem_value > 0) {
     sem->__sem_value--;
-    __pthread_unlock((struct _pthread_fastlock *) &sem->__sem_lock);
+    __pthread_unlock(&sem->__sem_lock);
     return 0;
   }
   /* Register extrication interface */
@@ -81,7 +81,7 @@ int __new_sem_wait(sem_t * sem)
     enqueue(&sem->__sem_waiting, self);
   else
     already_canceled = 1;
-  __pthread_unlock((struct _pthread_fastlock *) &sem->__sem_lock);
+  __pthread_unlock(&sem->__sem_lock);
 
   if (already_canceled) {
     __pthread_set_own_extricate_if(self, 0);
@@ -121,7 +121,7 @@ int __new_sem_trywait(sem_t * sem)
 {
   int retval;
 
-  __pthread_lock((struct _pthread_fastlock *) &sem->__sem_lock, NULL);
+  __pthread_lock(&sem->__sem_lock, NULL);
   if (sem->__sem_value == 0) {
     errno = EAGAIN;
     retval = -1;
@@ -129,7 +129,7 @@ int __new_sem_trywait(sem_t * sem)
     sem->__sem_value--;
     retval = 0;
   }
-  __pthread_unlock((struct _pthread_fastlock *) &sem->__sem_lock);
+  __pthread_unlock(&sem->__sem_lock);
   return retval;
 }
 
@@ -140,19 +140,19 @@ int __new_sem_post(sem_t * sem)
   struct pthread_request request;
 
   if (THREAD_GETMEM(self, p_in_sighandler) == NULL) {
-    __pthread_lock((struct _pthread_fastlock *) &sem->__sem_lock, self);
+    __pthread_lock(&sem->__sem_lock, self);
     if (sem->__sem_waiting == NULL) {
       if (sem->__sem_value >= SEM_VALUE_MAX) {
         /* Overflow */
         errno = ERANGE;
-        __pthread_unlock((struct _pthread_fastlock *) &sem->__sem_lock);
+        __pthread_unlock(&sem->__sem_lock);
         return -1;
       }
       sem->__sem_value++;
-      __pthread_unlock((struct _pthread_fastlock *) &sem->__sem_lock);
+      __pthread_unlock(&sem->__sem_lock);
     } else {
       th = dequeue(&sem->__sem_waiting);
-      __pthread_unlock((struct _pthread_fastlock *) &sem->__sem_lock);
+      __pthread_unlock(&sem->__sem_lock);
       th->p_sem_avail = 1;
       WRITE_MEMORY_BARRIER();
       restart(th);
@@ -214,17 +214,17 @@ int sem_timedwait(sem_t *sem, const stru
   int already_canceled = 0;
   int spurious_wakeup_count;
 
-  __pthread_lock((struct _pthread_fastlock *) &sem->__sem_lock, self);
+  __pthread_lock(&sem->__sem_lock, self);
   if (sem->__sem_value > 0) {
     --sem->__sem_value;
-    __pthread_unlock((struct _pthread_fastlock *) &sem->__sem_lock);
+    __pthread_unlock(&sem->__sem_lock);
     return 0;
   }
 
   if (abstime->tv_nsec < 0 || abstime->tv_nsec >= 1000000000) {
     /* The standard requires that if the function would block and the
        time value is illegal, the function returns with an error.  */
-    __pthread_unlock((struct _pthread_fastlock *) &sem->__sem_lock);
+    __pthread_unlock(&sem->__sem_lock);
     return EINVAL;
   }
 
@@ -241,7 +241,7 @@ int sem_timedwait(sem_t *sem, const stru
     enqueue(&sem->__sem_waiting, self);
   else
     already_canceled = 1;
-  __pthread_unlock((struct _pthread_fastlock *) &sem->__sem_lock);
+  __pthread_unlock(&sem->__sem_lock);
 
   if (already_canceled) {
     __pthread_set_own_extricate_if(self, 0);
@@ -257,9 +257,9 @@ int sem_timedwait(sem_t *sem, const stru
 	/* __pthread_lock will queue back any spurious restarts that
 	   may happen to it. */
 
-	__pthread_lock((struct _pthread_fastlock *)&sem->__sem_lock, self);
+	__pthread_lock(&sem->__sem_lock, self);
 	was_on_queue = remove_from_queue(&sem->__sem_waiting, self);
-	__pthread_unlock((struct _pthread_fastlock *)&sem->__sem_lock);
+	__pthread_unlock(&sem->__sem_lock);
 
 	if (was_on_queue) {
 	  __pthread_set_own_extricate_if(self, 0);
Index: linuxthreads/semaphore.h
===================================================================
RCS file: /cvs/glibc/libc/linuxthreads/semaphore.h,v
retrieving revision 1.10
diff -u -p -r1.10 semaphore.h
--- semaphore.h	2001/01/27 05:39:52	1.10
+++ semaphore.h	2001/04/25 06:51:17
@@ -31,11 +31,7 @@ typedef struct _pthread_descr_struct *_p
 /* System specific semaphore definition.  */
 typedef struct
 {
-  struct
-  {
-    long int __status;
-    int __spinlock;
-  } __sem_lock;
+  struct _pthread_fastlock __sem_lock;
   int __sem_value;
   _pthread_descr __sem_waiting;
 } sem_t;
Index: linuxthreads/spinlock.c
===================================================================
RCS file: /cvs/glibc/libc/linuxthreads/spinlock.c,v
retrieving revision 1.32
diff -u -p -r1.32 spinlock.c
--- spinlock.c	2001/02/19 18:47:27	1.32
+++ spinlock.c	2001/04/25 06:51:17
@@ -25,7 +25,7 @@
 #include "restart.h"
 
 #if !defined HAS_COMPARE_AND_SWAP || defined TEST_FOR_COMPARE_AND_SWAP
-static void __pthread_acquire(int * spinlock);
+static void __pthread_acquire(_lt_spinlock_t * spinlock);
 #endif
 
 
@@ -239,12 +239,12 @@ again:
 struct wait_node {
   struct wait_node *next;	/* Next node in null terminated linked list */
   pthread_descr thr;		/* The thread waiting with this node */
-  int abandoned;		/* Atomic flag */
+  _lt_spinlock_t abandoned;		/* Atomic flag */
 };
 
 static long wait_node_free_list;
 #if !defined HAS_COMPARE_AND_SWAP || defined TEST_FOR_COMPARE_AND_SWAP
-static int wait_node_free_list_spinlock;
+static _lt_spinlock_t wait_node_free_list_spinlock;
 #endif
 
 /* Allocate a new node from the head of the free list using an atomic
@@ -272,7 +272,7 @@ static struct wait_node *wait_node_alloc
       wait_node_free_list = (long) new_node->next;
     }
     WRITE_MEMORY_BARRIER();
-    wait_node_free_list_spinlock = 0;
+    wait_node_free_list_spinlock = __LT_SPINLOCK_INIT;
 
     if (new_node == 0)
       return malloc(sizeof *wait_node_alloc());
@@ -314,7 +314,7 @@ static void wait_node_free(struct wait_n
     wn->next = (struct wait_node *) wait_node_free_list;
     wait_node_free_list = (long) wn;
     WRITE_MEMORY_BARRIER();
-    wait_node_free_list_spinlock = 0;
+    wait_node_free_list_spinlock = __LT_SPINLOCK_INIT;
     return;
   }
 #endif
@@ -387,7 +387,7 @@ void __pthread_alt_lock(struct _pthread_
       if (self == NULL)
 	self = thread_self();
 
-      wait_node.abandoned = 0;
+      wait_node.abandoned = __LT_SPINLOCK_INIT;
       wait_node.next = (struct wait_node *) lock->__status;
       wait_node.thr = self;
       lock->__status = (long) &wait_node;
@@ -414,7 +414,7 @@ void __pthread_alt_lock(struct _pthread_
       wait_node.thr = self;
       newstatus = (long) &wait_node;
     }
-    wait_node.abandoned = 0;
+    wait_node.abandoned = __LT_SPINLOCK_INIT;
     wait_node.next = (struct wait_node *) oldstatus;
     /* Make sure the store in wait_node.next completes before performing
        the compare-and-swap */
@@ -461,7 +461,7 @@ int __pthread_alt_timedlock(struct _pthr
       if (self == NULL)
 	self = thread_self();
 
-      p_wait_node->abandoned = 0;
+      p_wait_node->abandoned = __LT_SPINLOCK_INIT;
       p_wait_node->next = (struct wait_node *) lock->__status;
       p_wait_node->thr = self;
       lock->__status = (long) p_wait_node;
@@ -485,7 +485,7 @@ int __pthread_alt_timedlock(struct _pthr
       p_wait_node->thr = self;
       newstatus = (long) p_wait_node;
     }
-    p_wait_node->abandoned = 0;
+    p_wait_node->abandoned = __LT_SPINLOCK_INIT;
     p_wait_node->next = (struct wait_node *) oldstatus;
     /* Make sure the store in wait_node.next completes before performing
        the compare-and-swap */
@@ -579,7 +579,7 @@ void __pthread_alt_unlock(struct _pthrea
     while (p_node != (struct wait_node *) 1) {
       int prio;
 
-      if (p_node->abandoned) {
+      if (test(&p_node->abandoned)) {
 	/* Remove abandoned node. */
 #if defined TEST_FOR_COMPARE_AND_SWAP
 	if (!__pthread_has_cas)
@@ -665,7 +665,7 @@ int __pthread_has_cas = 0;
 #if !defined HAS_COMPARE_AND_SWAP || defined TEST_FOR_COMPARE_AND_SWAP
 
 int __pthread_compare_and_swap(long * ptr, long oldval, long newval,
-                               int * spinlock)
+                               _lt_spinlock_t * spinlock)
 {
   int res;
   if (testandset(spinlock)) __pthread_acquire(spinlock);
@@ -676,7 +676,7 @@ int __pthread_compare_and_swap(long * pt
   }
   /* Prevent reordering of store to *ptr above and store to *spinlock below */
   WRITE_MEMORY_BARRIER();
-  *spinlock = 0;
+  *spinlock = __LT_SPINLOCK_INIT;
   return res;
 }
 
@@ -701,7 +701,7 @@ int __pthread_compare_and_swap(long * pt
    - When nanosleep() returns, we try again, doing MAX_SPIN_COUNT
      sched_yield(), then sleeping again if needed. */
 
-static void __pthread_acquire(int * spinlock)
+static void __pthread_acquire(_lt_spinlock_t * spinlock)
 {
   int cnt = 0;
   struct timespec tm;
Index: linuxthreads/spinlock.h
===================================================================
RCS file: /cvs/glibc/libc/linuxthreads/spinlock.h,v
retrieving revision 1.22
diff -u -p -r1.22 spinlock.h
--- spinlock.h	2001/01/28 08:52:05	1.22
+++ spinlock.h	2001/04/25 06:51:17
@@ -37,10 +37,10 @@
 
 extern int __pthread_has_cas;
 extern int __pthread_compare_and_swap(long * ptr, long oldval, long newval,
-                                      int * spinlock);
+                                      _lt_spinlock_t * spinlock);
 
 static inline int compare_and_swap(long * ptr, long oldval, long newval,
-                                   int * spinlock)
+                                   _lt_spinlock_t * spinlock)
 {
   if (__builtin_expect (__pthread_has_cas, 1))
     return __compare_and_swap(ptr, oldval, newval);
@@ -58,7 +58,7 @@ static inline int compare_and_swap(long 
 
 static inline int
 compare_and_swap_with_release_semantics (long * ptr, long oldval,
-					 long newval, int * spinlock)
+					 long newval, _lt_spinlock_t * spinlock)
 {
   return __compare_and_swap_with_release_semantics (ptr, oldval,
 						    newval);
@@ -67,7 +67,7 @@ compare_and_swap_with_release_semantics 
 #endif
 
 static inline int compare_and_swap(long * ptr, long oldval, long newval,
-                                   int * spinlock)
+                                   _lt_spinlock_t * spinlock)
 {
   return __compare_and_swap(ptr, oldval, newval);
 }
@@ -75,10 +75,10 @@ static inline int compare_and_swap(long 
 #else
 
 extern int __pthread_compare_and_swap(long * ptr, long oldval, long newval,
-                                      int * spinlock);
+                                      _lt_spinlock_t * spinlock);
 
 static inline int compare_and_swap(long * ptr, long oldval, long newval,
-                                   int * spinlock)
+                                   _lt_spinlock_t * spinlock)
 {
   return __pthread_compare_and_swap(ptr, oldval, newval, spinlock);
 }
Index: linuxthreads/sysdeps/hppa/pspinlock.c
===================================================================
RCS file: /cvs/glibc/libc/linuxthreads/sysdeps/hppa/pspinlock.c,v
retrieving revision 1.2
diff -u -p -r1.2 pspinlock.c
--- pspinlock.c	2000/12/27 17:17:17	1.2
+++ pspinlock.c	2001/04/25 06:51:17
@@ -21,18 +21,20 @@
 #include <pthread.h>
 #include "internals.h"
 
+/* LDCW, the only atomic read-write operation PA-RISC has. *sigh*.  */
+#define __ldcw(a) ({ \
+	unsigned __ret; \
+	__asm__ __volatile__("ldcw 0(%1),%0" : "=r" (__ret) : "r" (a)); \
+	__ret; \
+})
+
 int
 __pthread_spin_lock (pthread_spinlock_t *lock)
 {
-  unsigned int val;
-
-  do
-    asm volatile ("ldcw %1,%0"
-		  : "=r" (val), "=m" (*lock)
-		  : "m" (*lock));
-  while (!val);
+	while (__ldcw (*lock) == 0)
+		while (*lock == 0) ;
 
-  return 0;
+	return 0;
 }
 weak_alias (__pthread_spin_lock, pthread_spin_lock)
 
@@ -40,11 +42,7 @@ weak_alias (__pthread_spin_lock, pthread
 int
 __pthread_spin_trylock (pthread_spinlock_t *lock)
 {
-  unsigned int val;
-
-  asm volatile ("ldcw %1,%0"
-		: "=r" (val), "=m" (*lock)
-		: "m" (*lock));
+  unsigned int val = __ldcw(*lock);
 
   return val ? 0 : EBUSY;
 }
Index: linuxthreads/sysdeps/hppa/pt-machine.h
===================================================================
RCS file: /cvs/glibc/libc/linuxthreads/sysdeps/hppa/pt-machine.h,v
retrieving revision 1.2
diff -u -p -r1.2 pt-machine.h
--- pt-machine.h	2000/12/18 05:55:14	1.2
+++ pt-machine.h	2001/04/25 06:51:17
@@ -19,6 +19,7 @@
    write to the Free Software Foundation, Inc., 59 Temple Place - Suite 330,
    Boston, MA 02111-1307, USA.  */
 
+#include <sys/types.h>
 #include <bits/initspin.h>
 
 #ifndef PT_EI
@@ -35,11 +36,9 @@ register char * stack_pointer __asm__ ("
    load and clear, so hppa spinlocks must use zero to signify that
    someone is holding the lock.  */
 
-#define xstr(s) str(s)
-#define str(s) #s
 /* Spinlock implementation; required.  */
 PT_EI long int
-testandset (int *spinlock)
+testandset (_lt_spinlock_t *spinlock)
 {
   int ret;
 
@@ -50,5 +49,9 @@ testandset (int *spinlock)
 
   return ret == 0;
 }
-#undef str
-#undef xstr
+
+PT_EI long int
+test (_lt_spinlock_t *spinlock)
+{
+	return spinlock->lock == 0;
+}
Index: linuxthreads/sysdeps/pthread/bits/initspin.h
===================================================================
RCS file: /cvs/glibc/libc/linuxthreads/sysdeps/pthread/bits/initspin.h,v
retrieving revision 1.2
diff -u -p -r1.2 initspin.h
--- initspin.h	2001/01/28 08:47:31	1.2
+++ initspin.h	2001/04/25 06:51:17
@@ -20,9 +20,8 @@
 /* Initial value of a spinlock.  Most platforms should use zero,
    unless they only implement a "test and clear" operation instead of
    the usual "test and set". */
-#define __LT_SPINLOCK_INIT 0
+#define __LT_SPINLOCK_INIT  ((_lt_spinlock_t) { 0 })
 
 /* Macros for lock initializers, using the above definition. */
-#define __LOCK_INITIALIZER { 0, __LT_SPINLOCK_INIT }
-#define __ALT_LOCK_INITIALIZER { 0, __LT_SPINLOCK_INIT }
-#define __ATOMIC_INITIALIZER { 0, __LT_SPINLOCK_INIT }
+#define __LOCK_INITIALIZER { 0, { 0 } }
+#define __ATOMIC_INITIALIZER { 0, { 0 } }
Index: linuxthreads/sysdeps/pthread/bits/libc-lock.h
===================================================================
RCS file: /cvs/glibc/libc/linuxthreads/sysdeps/pthread/bits/libc-lock.h,v
retrieving revision 1.17
diff -u -p -r1.17 libc-lock.h
--- libc-lock.h	2001/01/28 16:39:07	1.17
+++ libc-lock.h	2001/04/25 06:51:17
@@ -55,7 +55,7 @@ typedef pthread_key_t __libc_key_t;
    initialized locks must be set to one due to the lack of normal
    atomic operations.) */
 
-#if __LT_SPINLOCK_INIT == 0
+#if 0 /* __LT_SPINLOCK_INIT == 0 */
 #  define __libc_lock_define_initialized(CLASS,NAME) \
   CLASS __libc_lock_t NAME;
 #else
Index: linuxthreads/sysdeps/pthread/bits/pthreadtypes.h
===================================================================
RCS file: /cvs/glibc/libc/linuxthreads/sysdeps/pthread/bits/pthreadtypes.h,v
retrieving revision 1.9
diff -u -p -r1.9 pthreadtypes.h
--- pthreadtypes.h	2001/01/27 06:26:13	1.9
+++ pthreadtypes.h	2001/04/25 06:51:17
@@ -22,11 +22,15 @@
 #define __need_schedparam
 #include <bits/sched.h>
 
+typedef struct {
+	int lock;
+} _lt_spinlock_t;
+
 /* Fast locks (not abstract because mutexes and conditions aren't abstract). */
 struct _pthread_fastlock
 {
   long int __status;   /* "Free" or "taken" or head of waiting list */
-  int __spinlock;      /* Used by compare_and_swap emulation. Also,
+  _lt_spinlock_t __spinlock;  /* Used by compare_and_swap emulation. Also,
 			  adaptive SMP lock stores spin count here. */
 };
 
Index: linuxthreads/sysdeps/unix/sysv/linux/hppa/bits/initspin.h
===================================================================
RCS file: /cvs/glibc/libc/linuxthreads/sysdeps/unix/sysv/linux/hppa/bits/initspin.h,v
retrieving revision 1.2
diff -u -p -r1.2 initspin.h
--- initspin.h	2001/01/28 08:47:11	1.2
+++ initspin.h	2001/04/25 06:51:17
@@ -19,9 +19,8 @@
 
 /* Initial value of a spinlock.  PA-RISC only implements atomic load
    and clear so this must be non-zero. */
-#define __LT_SPINLOCK_INIT 1
+#define __LT_SPINLOCK_INIT  ((_lt_spinlock_t) { 1 })
 
 /* Macros for lock initializers, using the above definition. */
-#define __LOCK_INITIALIZER { 0, __LT_SPINLOCK_INIT }
-#define __ALT_LOCK_INITIALIZER { 0, __LT_SPINLOCK_INIT }
-#define __ATOMIC_INITIALIZER { 0, __LT_SPINLOCK_INIT }
+#define __LOCK_INITIALIZER { { 1 }, 0 }
+#define __ATOMIC_INITIALIZER { 0, { 1 } }
Index: sunrpc/Makefile
===================================================================
RCS file: /cvs/glibc/libc/sunrpc/Makefile,v
retrieving revision 1.67
diff -u -p -r1.67 Makefile
--- Makefile	2001/03/20 18:31:37	1.67
+++ Makefile	2001/04/25 06:51:17
@@ -126,8 +126,7 @@ $(objpfx)rpcgen: $(addprefix $(objpfx),$
 	$(+link)
 
 # Tell rpcgen where to find the C preprocessor.
-rpcgen-cmd = $(built-program-cmd) -Y `$(CC) -print-file-name=cpp | \
-				      sed 's|/cpp$$||'`
+rpcgen-cmd = $(built-program-cmd) -Y /usr/bin
 
 # Install the rpc data base file.
 $(inst_sysconfdir)/rpc: etc.rpc $(+force)
Index: sysdeps/hppa/dl-fptr.c
===================================================================
RCS file: /cvs/glibc/libc/sysdeps/hppa/dl-fptr.c,v
retrieving revision 1.1
diff -u -p -r1.1 dl-fptr.c
--- dl-fptr.c	2000/10/15 03:18:05	1.1
+++ dl-fptr.c	2001/04/25 06:51:18
@@ -29,8 +29,7 @@
 #ifdef _LIBC_REENTRANT
 # include <pt-machine.h>
 
-/* Remember, we use 0 to mean that a lock is taken on PA-RISC. */
-static int __hppa_fptr_lock = 1;
+static _lt_spinlock_t __hppa_fptr_lock = __LT_SPINLOCK_INIT;
 #endif
 
 /* Because ld.so is now versioned, these functions can be in their own
@@ -126,9 +125,9 @@ __hppa_make_fptr (const struct link_map 
 
 found:
 #ifdef _LIBC_REENTRANT
-  /* Release the lock.  Again, remember, zero means the lock is taken!  */
+  /* Release the lock. */
   if (mem == NULL)
-    __hppa_fptr_lock = 1;
+    __hppa_fptr_lock = __LT_SPINLOCK_INIT;
 #endif
 
   /* Set bit 30 to indicate to $$dyncall that this is a PLABEL. */
@@ -181,7 +180,7 @@ _dl_unmap (struct link_map *map)
 
 #ifdef _LIBC_REENTRANT
   /* Release the lock. */
-  __hppa_fptr_lock = 1;
+  __hppa_fptr_lock = __LT_SPINLOCK_INIT;
 #endif
 }
 
@@ -204,8 +203,8 @@ _dl_lookup_address (const void *address)
       }
 
 #ifdef _LIBC_REENTRANT
-  /* Release the lock.   */
-  __hppa_fptr_lock = 1;
+  /* Release the lock. */
+  __hppa_fptr_lock = __LT_SPINLOCK_INIT;
 #endif
 
   return addr;
Index: sysdeps/hppa/fpu/fclrexcpt.c
===================================================================
RCS file: /cvs/glibc/libc/sysdeps/hppa/fpu/fclrexcpt.c,v
retrieving revision 1.1
diff -u -p -r1.1 fclrexcpt.c
--- fclrexcpt.c	2000/10/15 03:33:51	1.1
+++ fclrexcpt.c	2001/04/25 06:51:18
@@ -29,7 +29,7 @@ feclearexcept (int excepts)
   __asm__ ("fstd %%fr0,0(%1)" : "=m" (*sw) : "r" (sw));
 
   /* Clear all the relevant bits. */
-  sw[0] &= ~(excepts & FE_ALL_EXCEPT);
+  sw[0] &= ~(excepts & FE_ALL_EXCEPT) << 27;
   __asm__ ("fldd 0(%0),%%fr0" : : "r" (sw));
 
   /* Success.  */
Index: sysdeps/unix/sysv/linux/kernel-features.h
===================================================================
RCS file: /cvs/glibc/libc/sysdeps/unix/sysv/linux/kernel-features.h,v
retrieving revision 1.24
diff -u -p -r1.24 kernel-features.h
--- kernel-features.h	2001/04/19 20:45:18	1.24
+++ kernel-features.h	2001/04/25 06:51:18
@@ -169,3 +169,17 @@
 #if __LINUX_KERNEL_VERSION >= (132096+99) && defined __powerpc__
 # define __ASSUME_STD_AUXV	1
 #endif
+
+/* There are an infinite number of PA-RISC kernel versions numbered 2.4.0.
+ * But they've not really been released as such, so `upgrade to latest
+ * CVS kernel if you want to compile your own libc' is a reasonable rule.
+ */
+#ifdef __hppa__
+# define __ASSUME_TRUNCATE64_SYSCALL	1
+# define __ASSUME_MMAP2_SYSCALL		1
+# define __ASSUME_STAT64_SYSCALL	1
+# define __ASSUME_IPC64			1
+# define __ASSUME_ST_INO_64_BIT		1
+# define __ASSUME_FCNTL64		1
+# define __ASSUME_GETDENTS64_SYSCALL	1
+#endif
