This patch adds support for parisc to and applies on top of: http://redhat.com/~mingo/spinlock-patches/consolidate-spinlocks.patch Posted on: > Date: Fri, 3 Jun 2005 17:40:29 +0200 > From: Ingo Molnar > Subject: [patch] spinlock consolidation, v2 Booted and lightly tested on a500-44 (64-bit, SMP kernel, dual CPU). Builds 32-bit SMP kernel (not booted or tested). Do not try to build non-SMP kernels. That should be trivial to fix up later if necessary. I converted bit ops, tlb, and IPI locks to raw_spinlock_t. Doing so avoids some ugly nesting of linux/*.h and asm/*.h files. Those particular locks are well tested and contained entirely inside arch specific code. I do NOT expect any new issues to arise with them. If someone does ever need to use debug/metrics with them, then they will need to unravel this hairball between spinlocks, atomic ops, and bit ops that exist only because parisc has exactly one atomic instruction: LDCW (load and clear word). thanks, grant Signed-off-by: Grant Grundler --- linux/include/asm/processor.h-orig 20 May 2005 00:05:13 -0000 +++ linux/include/asm/processor.h 6 Jun 2005 16:11:45 -0000 @@ -11,6 +11,7 @@ #ifndef __ASSEMBLY__ #include #include +#include #include #include --- linux/arch/parisc/lib/bitops.c-orig 15 Aug 2004 14:17:39 -0000 +++ linux/arch/parisc/lib/bitops.c 6 Jun 2005 04:32:33 -0000 @@ -11,10 +11,10 @@ #include #include #include #ifdef CONFIG_SMP -spinlock_t __atomic_hash[ATOMIC_HASH_SIZE] __lock_aligned = { - [0 ... (ATOMIC_HASH_SIZE-1)] = SPIN_LOCK_UNLOCKED +raw_spinlock_t __atomic_hash[ATOMIC_HASH_SIZE] __lock_aligned = { + [0 ... (ATOMIC_HASH_SIZE-1)] = __RAW_SPIN_LOCK_UNLOCKED }; #endif --- linux/include/asm-parisc/atomic.h-orig 27 Aug 2004 17:38:21 -0000 +++ linux/include/asm-parisc/atomic.h 6 Jun 2005 04:32:36 -0000 @@ -24,19 +24,19 @@ # define ATOMIC_HASH_SIZE 4 # define ATOMIC_HASH(a) (&(__atomic_hash[ (((unsigned long) a)/L1_CACHE_BYTES) & (ATOMIC_HASH_SIZE-1) ])) -extern spinlock_t __atomic_hash[ATOMIC_HASH_SIZE] __lock_aligned; +extern raw_spinlock_t __atomic_hash[ATOMIC_HASH_SIZE] __lock_aligned; -/* Can't use _raw_spin_lock_irq because of #include problems, so +/* Can't use raw_spin_lock_irq because of #include problems, so * this is the substitute */ #define _atomic_spin_lock_irqsave(l,f) do { \ - spinlock_t *s = ATOMIC_HASH(l); \ + raw_spinlock_t *s = ATOMIC_HASH(l); \ local_irq_save(f); \ - _raw_spin_lock(s); \ + __raw_spin_lock(s); \ } while(0) #define _atomic_spin_unlock_irqrestore(l,f) do { \ - spinlock_t *s = ATOMIC_HASH(l); \ - _raw_spin_unlock(s); \ + raw_spinlock_t *s = ATOMIC_HASH(l); \ + __raw_spin_unlock(s); \ local_irq_restore(f); \ } while(0) --- linux/include/asm-parisc/bitops.h-orig 4 Jun 2005 05:42:21 -0000 +++ linux/include/asm-parisc/bitops.h 6 Jun 2005 04:32:36 -0000 @@ -3,7 +3,7 @@ #include #include /* for BITS_PER_LONG/SHIFT_PER_LONG */ -#include +#include #include #include --- linux/include/asm-parisc/cacheflush.h-orig 18 Mar 2005 13:17:43 -0000 +++ linux/include/asm-parisc/cacheflush.h 6 Jun 2005 04:32:36 -0000 @@ -3,6 +3,7 @@ #include #include +#include /* for flush_user_dcache_range_asm() proto */ /* The usual comment is "Caches aren't brain-dead on the ". * Unfortunately, that doesn't apply to PA-RISC. */ --- linux/include/asm-parisc/spinlock.h-orig 7 Mar 2005 15:05:48 +++ linux/include/asm-parisc/spinlock.h 6 Jun 2005 04:32:36 -0000 @@ -2,6 +2,8 @@ #define __ASM_SPINLOCK_H #include +#include +#include /* Note that PA-RISC has to use `1' to mean unlocked and `0' to mean locked * since it only has load-and-zero. Moreover, at least on some PA processors,