Home Home > GIT Browse
summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorDavid S. Miller <davem@kernel.bkbits.net>2004-02-13 06:09:30 -0800
committerDavid S. Miller <davem@kernel.bkbits.net>2004-02-13 06:09:30 -0800
commit5d8691683220533b2ae46b4b63057dd2b4405d94 (patch)
tree25727330f3f5b3c230b79cac7bfb686899d22056
parentd1c0dfc84a8d1a97db4bf3fc5f67c0fa6e6cd5ec (diff)
parent6afb3c3238a4479b1a9d6221a1bd7fd85854a68d (diff)
Merge davem@nuts.davemloft.net:/disk1/BK/sparc-2.6
into kernel.bkbits.net:/home/davem/sparc-2.6
-rw-r--r--arch/sparc/kernel/irq.c60
-rw-r--r--arch/sparc/kernel/process.c9
-rw-r--r--arch/sparc/kernel/semaphore.c8
-rw-r--r--arch/sparc/kernel/smp.c3
-rw-r--r--arch/sparc/kernel/sparc_ksyms.c12
-rw-r--r--arch/sparc/lib/atomic.S20
-rw-r--r--include/asm-generic/local.h2
-rw-r--r--include/asm-i386/atomic.h30
-rw-r--r--include/asm-mips/atomic.h36
-rw-r--r--include/asm-sparc/atomic.h97
-rw-r--r--include/asm-sparc/dma-mapping.h22
-rw-r--r--include/asm-sparc/processor.h1
-rw-r--r--include/asm-sparc/semaphore.h12
-rw-r--r--include/asm-sparc/system.h98
-rw-r--r--include/asm-x86_64/atomic.h30
15 files changed, 231 insertions, 209 deletions
diff --git a/arch/sparc/kernel/irq.c b/arch/sparc/kernel/irq.c
index 87378e7711eb..3c982da8bb17 100644
--- a/arch/sparc/kernel/irq.c
+++ b/arch/sparc/kernel/irq.c
@@ -52,6 +52,66 @@
/* Used to protect the IRQ action lists */
spinlock_t irq_action_lock = SPIN_LOCK_UNLOCKED;
+#ifdef CONFIG_SMP
+#define SMP_NOP2 "nop; nop;\n\t"
+#define SMP_NOP3 "nop; nop; nop;\n\t"
+#else
+#define SMP_NOP2
+#define SMP_NOP3
+#endif /* SMP */
+unsigned long __local_irq_save(void)
+{
+ unsigned long retval;
+ unsigned long tmp;
+
+ __asm__ __volatile__(
+ "rd %%psr, %0\n\t"
+ SMP_NOP3 /* Sun4m + Cypress + SMP bug */
+ "or %0, %2, %1\n\t"
+ "wr %1, 0, %%psr\n\t"
+ "nop; nop; nop\n"
+ : "=&r" (retval), "=r" (tmp)
+ : "i" (PSR_PIL)
+ : "memory");
+
+ return retval;
+}
+
+void local_irq_enable(void)
+{
+ unsigned long tmp;
+
+ __asm__ __volatile__(
+ "rd %%psr, %0\n\t"
+ SMP_NOP3 /* Sun4m + Cypress + SMP bug */
+ "andn %0, %1, %0\n\t"
+ "wr %0, 0, %%psr\n\t"
+ "nop; nop; nop\n"
+ : "=&r" (tmp)
+ : "i" (PSR_PIL)
+ : "memory");
+}
+
+void local_irq_restore(unsigned long old_psr)
+{
+ unsigned long tmp;
+
+ __asm__ __volatile__(
+ "rd %%psr, %0\n\t"
+ "and %2, %1, %2\n\t"
+ SMP_NOP2 /* Sun4m + Cypress + SMP bug */
+ "andn %0, %1, %0\n\t"
+ "wr %0, %2, %%psr\n\t"
+ "nop; nop; nop\n"
+ : "=&r" (tmp)
+ : "i" (PSR_PIL), "r" (old_psr)
+ : "memory");
+}
+
+EXPORT_SYMBOL(__local_irq_save);
+EXPORT_SYMBOL(local_irq_enable);
+EXPORT_SYMBOL(local_irq_restore);
+
/*
* Dave Redman (djhr@tadpole.co.uk)
*
diff --git a/arch/sparc/kernel/process.c b/arch/sparc/kernel/process.c
index f7f20790ed9f..7e6c60d50775 100644
--- a/arch/sparc/kernel/process.c
+++ b/arch/sparc/kernel/process.c
@@ -148,11 +148,12 @@ extern char reboot_command [];
extern void (*prom_palette)(int);
+/* XXX cli/sti -> local_irq_xxx here, check this works once SMP is fixed. */
void machine_halt(void)
{
- sti();
+ local_irq_enable();
mdelay(8);
- cli();
+ local_irq_disable();
if (!serial_console && prom_palette)
prom_palette (1);
prom_halt();
@@ -165,9 +166,9 @@ void machine_restart(char * cmd)
{
char *p;
- sti();
+ local_irq_enable();
mdelay(8);
- cli();
+ local_irq_disable();
p = strchr (reboot_command, '\n');
if (p) *p = 0;
diff --git a/arch/sparc/kernel/semaphore.c b/arch/sparc/kernel/semaphore.c
index d8600da31861..5a8f3d176a8f 100644
--- a/arch/sparc/kernel/semaphore.c
+++ b/arch/sparc/kernel/semaphore.c
@@ -61,7 +61,7 @@ void __down(struct semaphore * sem)
* Add "everybody else" into it. They aren't
* playing, because we own the spinlock.
*/
- if (!atomic_add_negative(sleepers - 1, &sem->count)) {
+ if (!atomic24_add_negative(sleepers - 1, &sem->count)) {
sem->sleepers = 0;
break;
}
@@ -101,7 +101,7 @@ int __down_interruptible(struct semaphore * sem)
if (signal_pending(current)) {
retval = -EINTR;
sem->sleepers = 0;
- atomic_add(sleepers, &sem->count);
+ atomic24_add(sleepers, &sem->count);
break;
}
@@ -111,7 +111,7 @@ int __down_interruptible(struct semaphore * sem)
* "-1" is because we're still hoping to get
* the lock.
*/
- if (!atomic_add_negative(sleepers - 1, &sem->count)) {
+ if (!atomic24_add_negative(sleepers - 1, &sem->count)) {
sem->sleepers = 0;
break;
}
@@ -146,7 +146,7 @@ int __down_trylock(struct semaphore * sem)
* Add "everybody else" and us into it. They aren't
* playing, because we own the spinlock.
*/
- if (!atomic_add_negative(sleepers, &sem->count))
+ if (!atomic24_add_negative(sleepers, &sem->count))
wake_up(&sem->wait);
spin_unlock_irqrestore(&semaphore_lock, flags);
diff --git a/arch/sparc/kernel/smp.c b/arch/sparc/kernel/smp.c
index 1e4d1597ef6c..7dacedac5781 100644
--- a/arch/sparc/kernel/smp.c
+++ b/arch/sparc/kernel/smp.c
@@ -56,6 +56,9 @@ int smp_activated = 0;
volatile int __cpu_number_map[NR_CPUS];
volatile int __cpu_logical_map[NR_CPUS];
cycles_t cacheflush_time = 0; /* XXX */
+spinlock_t __atomic_hash[ATOMIC_HASH_SIZE] = {
+ [0 ... (ATOMIC_HASH_SIZE-1)] = SPIN_LOCK_UNLOCKED
+};
/* The only guaranteed locking primitive available on all Sparc
* processors is 'ldstub [%reg + immediate], %dest_reg' which atomically
diff --git a/arch/sparc/kernel/sparc_ksyms.c b/arch/sparc/kernel/sparc_ksyms.c
index c23185b9d6d3..80e007d6febc 100644
--- a/arch/sparc/kernel/sparc_ksyms.c
+++ b/arch/sparc/kernel/sparc_ksyms.c
@@ -86,8 +86,8 @@ extern int __divdi3(int, int);
extern void dump_thread(struct pt_regs *, struct user *);
/* Private functions with odd calling conventions. */
-extern void ___atomic_add(void);
-extern void ___atomic_sub(void);
+extern void ___atomic24_add(void);
+extern void ___atomic24_sub(void);
extern void ___set_bit(void);
extern void ___clear_bit(void);
extern void ___change_bit(void);
@@ -147,8 +147,8 @@ EXPORT_SYMBOL(sparc_valid_addr_bitmap);
EXPORT_SYMBOL(phys_base);
/* Atomic operations. */
-EXPORT_SYMBOL(___atomic_add);
-EXPORT_SYMBOL(___atomic_sub);
+EXPORT_SYMBOL(___atomic24_add);
+EXPORT_SYMBOL(___atomic24_sub);
/* Bit operations. */
EXPORT_SYMBOL(___set_bit);
@@ -159,10 +159,6 @@ EXPORT_SYMBOL(___change_bit);
/* IRQ implementation. */
EXPORT_SYMBOL(global_irq_holder);
EXPORT_SYMBOL(synchronize_irq);
-EXPORT_SYMBOL(__global_cli);
-EXPORT_SYMBOL(__global_sti);
-EXPORT_SYMBOL(__global_save_flags);
-EXPORT_SYMBOL(__global_restore_flags);
/* Misc SMP information */
EXPORT_SYMBOL(__cpu_number_map);
diff --git a/arch/sparc/lib/atomic.S b/arch/sparc/lib/atomic.S
index 31da5750b5ab..98362d39cc25 100644
--- a/arch/sparc/lib/atomic.S
+++ b/arch/sparc/lib/atomic.S
@@ -45,8 +45,8 @@ ___xchg32_sun4md:
/* Read asm-sparc/atomic.h carefully to understand how this works for SMP.
* Really, some things here for SMP are overly clever, go read the header.
*/
- .globl ___atomic_add
-___atomic_add:
+ .globl ___atomic24_add
+___atomic24_add:
rd %psr, %g3 ! Keep the code small, old way was stupid
nop; nop; nop; ! Let the bits set
or %g3, PSR_PIL, %g7 ! Disable interrupts
@@ -56,13 +56,13 @@ ___atomic_add:
1: ldstub [%g1 + 3], %g7 ! Spin on the byte lock for SMP.
orcc %g7, 0x0, %g0 ! Did we get it?
bne 1b ! Nope...
- ld [%g1], %g7 ! Load locked atomic_t
+ ld [%g1], %g7 ! Load locked atomic24_t
sra %g7, 8, %g7 ! Get signed 24-bit integer
add %g7, %g2, %g2 ! Add in argument
- sll %g2, 8, %g7 ! Transpose back to atomic_t
+ sll %g2, 8, %g7 ! Transpose back to atomic24_t
st %g7, [%g1] ! Clever: This releases the lock as well.
#else
- ld [%g1], %g7 ! Load locked atomic_t
+ ld [%g1], %g7 ! Load locked atomic24_t
add %g7, %g2, %g2 ! Add in argument
st %g2, [%g1] ! Store it back
#endif
@@ -71,8 +71,8 @@ ___atomic_add:
jmpl %o7, %g0 ! NOTE: not + 8, see callers in atomic.h
mov %g4, %o7 ! Restore %o7
- .globl ___atomic_sub
-___atomic_sub:
+ .globl ___atomic24_sub
+___atomic24_sub:
rd %psr, %g3 ! Keep the code small, old way was stupid
nop; nop; nop; ! Let the bits set
or %g3, PSR_PIL, %g7 ! Disable interrupts
@@ -82,13 +82,13 @@ ___atomic_sub:
1: ldstub [%g1 + 3], %g7 ! Spin on the byte lock for SMP.
orcc %g7, 0x0, %g0 ! Did we get it?
bne 1b ! Nope...
- ld [%g1], %g7 ! Load locked atomic_t
+ ld [%g1], %g7 ! Load locked atomic24_t
sra %g7, 8, %g7 ! Get signed 24-bit integer
sub %g7, %g2, %g2 ! Subtract argument
- sll %g2, 8, %g7 ! Transpose back to atomic_t
+ sll %g2, 8, %g7 ! Transpose back to atomic24_t
st %g7, [%g1] ! Clever: This releases the lock as well
#else
- ld [%g1], %g7 ! Load locked atomic_t
+ ld [%g1], %g7 ! Load locked atomic24_t
sub %g7, %g2, %g2 ! Subtract argument
st %g2, [%g1] ! Store it back
#endif
diff --git a/include/asm-generic/local.h b/include/asm-generic/local.h
index 45c07dbc0a59..c814b2f840ba 100644
--- a/include/asm-generic/local.h
+++ b/include/asm-generic/local.h
@@ -9,7 +9,7 @@
/* An unsigned long type for operations which are atomic for a single
* CPU. Usually used in combination with per-cpu variables. */
-#if BITS_PER_LONG == 32 && !defined(CONFIG_SPARC32)
+#if BITS_PER_LONG == 32
/* Implement in terms of atomics. */
/* Don't use typedef: don't want them to be mixed with atomic_t's. */
diff --git a/include/asm-i386/atomic.h b/include/asm-i386/atomic.h
index 61a1aece830b..4df45c5e4b16 100644
--- a/include/asm-i386/atomic.h
+++ b/include/asm-i386/atomic.h
@@ -27,8 +27,7 @@ typedef struct { volatile int counter; } atomic_t;
* atomic_read - read atomic variable
* @v: pointer of type atomic_t
*
- * Atomically reads the value of @v. Note that the guaranteed
- * useful range of an atomic_t is only 24 bits.
+ * Atomically reads the value of @v.
*/
#define atomic_read(v) ((v)->counter)
@@ -37,8 +36,7 @@ typedef struct { volatile int counter; } atomic_t;
* @v: pointer of type atomic_t
* @i: required value
*
- * Atomically sets the value of @v to @i. Note that the guaranteed
- * useful range of an atomic_t is only 24 bits.
+ * Atomically sets the value of @v to @i.
*/
#define atomic_set(v,i) (((v)->counter) = (i))
@@ -47,8 +45,7 @@ typedef struct { volatile int counter; } atomic_t;
* @i: integer value to add
* @v: pointer of type atomic_t
*
- * Atomically adds @i to @v. Note that the guaranteed useful range
- * of an atomic_t is only 24 bits.
+ * Atomically adds @i to @v.
*/
static __inline__ void atomic_add(int i, atomic_t *v)
{
@@ -63,8 +60,7 @@ static __inline__ void atomic_add(int i, atomic_t *v)
* @i: integer value to subtract
* @v: pointer of type atomic_t
*
- * Atomically subtracts @i from @v. Note that the guaranteed
- * useful range of an atomic_t is only 24 bits.
+ * Atomically subtracts @i from @v.
*/
static __inline__ void atomic_sub(int i, atomic_t *v)
{
@@ -81,8 +77,7 @@ static __inline__ void atomic_sub(int i, atomic_t *v)
*
* Atomically subtracts @i from @v and returns
* true if the result is zero, or false for all
- * other cases. Note that the guaranteed
- * useful range of an atomic_t is only 24 bits.
+ * other cases.
*/
static __inline__ int atomic_sub_and_test(int i, atomic_t *v)
{
@@ -99,8 +94,7 @@ static __inline__ int atomic_sub_and_test(int i, atomic_t *v)
* atomic_inc - increment atomic variable
* @v: pointer of type atomic_t
*
- * Atomically increments @v by 1. Note that the guaranteed
- * useful range of an atomic_t is only 24 bits.
+ * Atomically increments @v by 1.
*/
static __inline__ void atomic_inc(atomic_t *v)
{
@@ -114,8 +108,7 @@ static __inline__ void atomic_inc(atomic_t *v)
* atomic_dec - decrement atomic variable
* @v: pointer of type atomic_t
*
- * Atomically decrements @v by 1. Note that the guaranteed
- * useful range of an atomic_t is only 24 bits.
+ * Atomically decrements @v by 1.
*/
static __inline__ void atomic_dec(atomic_t *v)
{
@@ -131,8 +124,7 @@ static __inline__ void atomic_dec(atomic_t *v)
*
* Atomically decrements @v by 1 and
* returns true if the result is 0, or false for all other
- * cases. Note that the guaranteed
- * useful range of an atomic_t is only 24 bits.
+ * cases.
*/
static __inline__ int atomic_dec_and_test(atomic_t *v)
{
@@ -151,8 +143,7 @@ static __inline__ int atomic_dec_and_test(atomic_t *v)
*
* Atomically increments @v by 1
* and returns true if the result is zero, or false for all
- * other cases. Note that the guaranteed
- * useful range of an atomic_t is only 24 bits.
+ * other cases.
*/
static __inline__ int atomic_inc_and_test(atomic_t *v)
{
@@ -172,8 +163,7 @@ static __inline__ int atomic_inc_and_test(atomic_t *v)
*
* Atomically adds @i to @v and returns true
* if the result is negative, or false when
- * result is greater than or equal to zero. Note that the guaranteed
- * useful range of an atomic_t is only 24 bits.
+ * result is greater than or equal to zero.
*/
static __inline__ int atomic_add_negative(int i, atomic_t *v)
{
diff --git a/include/asm-mips/atomic.h b/include/asm-mips/atomic.h
index ccecd9767cb0..69d676ddb1ab 100644
--- a/include/asm-mips/atomic.h
+++ b/include/asm-mips/atomic.h
@@ -29,8 +29,7 @@ typedef struct { volatile __s64 counter; } atomic64_t;
* atomic_read - read atomic variable
* @v: pointer of type atomic_t
*
- * Atomically reads the value of @v. Note that the guaranteed
- * useful range of an atomic_t is only 24 bits.
+ * Atomically reads the value of @v.
*/
#define atomic_read(v) ((v)->counter)
@@ -46,8 +45,7 @@ typedef struct { volatile __s64 counter; } atomic64_t;
* @v: pointer of type atomic_t
* @i: required value
*
- * Atomically sets the value of @v to @i. Note that the guaranteed
- * useful range of an atomic_t is only 24 bits.
+ * Atomically sets the value of @v to @i.
*/
#define atomic_set(v,i) ((v)->counter = (i))
@@ -68,8 +66,7 @@ typedef struct { volatile __s64 counter; } atomic64_t;
* @i: integer value to add
* @v: pointer of type atomic_t
*
- * Atomically adds @i to @v. Note that the guaranteed useful range
- * of an atomic_t is only 24 bits.
+ * Atomically adds @i to @v.
*/
static __inline__ void atomic_add(int i, atomic_t * v)
{
@@ -85,8 +82,7 @@ static __inline__ void atomic_add(int i, atomic_t * v)
* @i: integer value to subtract
* @v: pointer of type atomic_t
*
- * Atomically subtracts @i from @v. Note that the guaranteed
- * useful range of an atomic_t is only 24 bits.
+ * Atomically subtracts @i from @v.
*/
static __inline__ void atomic_sub(int i, atomic_t * v)
{
@@ -137,8 +133,7 @@ static __inline__ int atomic_sub_return(int i, atomic_t * v)
* @i: integer value to add
* @v: pointer of type atomic_t
*
- * Atomically adds @i to @v. Note that the guaranteed useful range
- * of an atomic_t is only 24 bits.
+ * Atomically adds @i to @v.
*/
static __inline__ void atomic_add(int i, atomic_t * v)
{
@@ -158,8 +153,7 @@ static __inline__ void atomic_add(int i, atomic_t * v)
* @i: integer value to subtract
* @v: pointer of type atomic_t
*
- * Atomically subtracts @i from @v. Note that the guaranteed
- * useful range of an atomic_t is only 24 bits.
+ * Atomically subtracts @i from @v.
*/
static __inline__ void atomic_sub(int i, atomic_t * v)
{
@@ -390,8 +384,7 @@ static __inline__ int atomic64_sub_return(int i, atomic64_t * v)
*
* Atomically subtracts @i from @v and returns
* true if the result is zero, or false for all
- * other cases. Note that the guaranteed
- * useful range of an atomic_t is only 24 bits.
+ * other cases.
*/
#define atomic_sub_and_test(i,v) (atomic_sub_return((i), (v)) == 0)
@@ -412,8 +405,7 @@ static __inline__ int atomic64_sub_return(int i, atomic64_t * v)
*
* Atomically increments @v by 1
* and returns true if the result is zero, or false for all
- * other cases. Note that the guaranteed
- * useful range of an atomic_t is only 24 bits.
+ * other cases.
*/
#define atomic_inc_and_test(v) (atomic_inc_return(v) == 0)
@@ -433,8 +425,7 @@ static __inline__ int atomic64_sub_return(int i, atomic64_t * v)
*
* Atomically decrements @v by 1 and
* returns true if the result is 0, or false for all other
- * cases. Note that the guaranteed
- * useful range of an atomic_t is only 24 bits.
+ * cases.
*/
#define atomic_dec_and_test(v) (atomic_sub_return(1, (v)) == 0)
@@ -452,8 +443,7 @@ static __inline__ int atomic64_sub_return(int i, atomic64_t * v)
* atomic_inc - increment atomic variable
* @v: pointer of type atomic_t
*
- * Atomically increments @v by 1. Note that the guaranteed
- * useful range of an atomic_t is only 24 bits.
+ * Atomically increments @v by 1.
*/
#define atomic_inc(v) atomic_add(1,(v))
@@ -469,8 +459,7 @@ static __inline__ int atomic64_sub_return(int i, atomic64_t * v)
* atomic_dec - decrement and test
* @v: pointer of type atomic_t
*
- * Atomically decrements @v by 1. Note that the guaranteed
- * useful range of an atomic_t is only 24 bits.
+ * Atomically decrements @v by 1.
*/
#define atomic_dec(v) atomic_sub(1,(v))
@@ -489,8 +478,7 @@ static __inline__ int atomic64_sub_return(int i, atomic64_t * v)
*
* Atomically adds @i to @v and returns true
* if the result is negative, or false when
- * result is greater than or equal to zero. Note that the guaranteed
- * useful range of an atomic_t is only 24 bits.
+ * result is greater than or equal to zero.
*/
#define atomic_add_negative(i,v) (atomic_add_return(i, (v)) < 0)
diff --git a/include/asm-sparc/atomic.h b/include/asm-sparc/atomic.h
index f42ba9526265..873f806fc678 100644
--- a/include/asm-sparc/atomic.h
+++ b/include/asm-sparc/atomic.h
@@ -2,21 +2,82 @@
*
* Copyright (C) 1996 David S. Miller (davem@caip.rutgers.edu)
* Copyright (C) 2000 Anton Blanchard (anton@linuxcare.com.au)
+ *
+ * Additions by Keith M Wesolowski (wesolows@foobazco.org) based
+ * on asm-parisc/atomic.h Copyright (C) 2000 Philipp Rumpf <prumpf@tux.org>.
*/
#ifndef __ARCH_SPARC_ATOMIC__
#define __ARCH_SPARC_ATOMIC__
#include <linux/config.h>
+#include <linux/spinlock.h>
typedef struct { volatile int counter; } atomic_t;
#ifdef __KERNEL__
-#ifndef CONFIG_SMP
+
+#ifdef CONFIG_SMP
+
+#define ATOMIC_HASH_SIZE 4
+#define ATOMIC_HASH(a) (&__atomic_hash[(((unsigned long)a)>>8) & (ATOMIC_HASH_SIZE-1)])
+extern spinlock_t __atomic_hash[ATOMIC_HASH_SIZE];
+
+#else /* SMP */
+
+#define ATOMIC_HASH_SIZE 1
+#define ATOMIC_HASH(a) 0
+
+#endif /* SMP */
+
+static inline int __atomic_add_return(int i, atomic_t *v)
+{
+ int ret;
+ unsigned long flags;
+ spin_lock_irqsave(ATOMIC_HASH(v), flags);
+
+ ret = (v->counter += i);
+
+ spin_unlock_irqrestore(ATOMIC_HASH(v), flags);
+ return ret;
+}
+
+static inline void atomic_set(atomic_t *v, int i)
+{
+ unsigned long flags;
+ spin_lock_irqsave(ATOMIC_HASH(v), flags);
+
+ v->counter = i;
+
+ spin_unlock_irqrestore(ATOMIC_HASH(v), flags);
+}
#define ATOMIC_INIT(i) { (i) }
+
#define atomic_read(v) ((v)->counter)
-#define atomic_set(v, i) (((v)->counter) = i)
+
+#define atomic_add(i, v) ((void)__atomic_add_return( (int)(i), (v)))
+#define atomic_sub(i, v) ((void)__atomic_add_return(-(int)(i), (v)))
+#define atomic_inc(v) ((void)__atomic_add_return( 1, (v)))
+#define atomic_dec(v) ((void)__atomic_add_return( -1, (v)))
+
+#define atomic_add_return(i, v) (__atomic_add_return( (int)(i), (v)))
+#define atomic_sub_return(i, v) (__atomic_add_return(-(int)(i), (v)))
+#define atomic_inc_return(v) (__atomic_add_return( 1, (v)))
+#define atomic_dec_return(v) (__atomic_add_return( -1, (v)))
+
+#define atomic_dec_and_test(v) (atomic_dec_return(v) == 0)
+
+/* This is the old 24-bit implementation. It's still used internally
+ * by some sparc-specific code, notably the semaphore implementation.
+ */
+typedef struct { volatile int counter; } atomic24_t;
+
+#ifndef CONFIG_SMP
+
+#define ATOMIC24_INIT(i) { (i) }
+#define atomic24_read(v) ((v)->counter)
+#define atomic24_set(v, i) (((v)->counter) = i)
#else
/* We do the bulk of the actual work out of line in two common
@@ -33,9 +94,9 @@ typedef struct { volatile int counter; } atomic_t;
* 31 8 7 0
*/
-#define ATOMIC_INIT(i) { ((i) << 8) }
+#define ATOMIC24_INIT(i) { ((i) << 8) }
-static __inline__ int atomic_read(const atomic_t *v)
+static inline int atomic24_read(const atomic24_t *v)
{
int ret = v->counter;
@@ -45,10 +106,10 @@ static __inline__ int atomic_read(const atomic_t *v)
return ret >> 8;
}
-#define atomic_set(v, i) (((v)->counter) = ((i) << 8))
+#define atomic24_set(v, i) (((v)->counter) = ((i) << 8))
#endif
-static inline int __atomic_add(int i, atomic_t *v)
+static inline int __atomic24_add(int i, atomic24_t *v)
{
register volatile int *ptr asm("g1");
register int increment asm("g2");
@@ -61,7 +122,7 @@ static inline int __atomic_add(int i, atomic_t *v)
__asm__ __volatile__(
"mov %%o7, %%g4\n\t"
- "call ___atomic_add\n\t"
+ "call ___atomic24_add\n\t"
" add %%o7, 8, %%o7\n"
: "=&r" (increment), "=r" (tmp1), "=r" (tmp2), "=r" (tmp3)
: "0" (increment), "r" (ptr)
@@ -70,7 +131,7 @@ static inline int __atomic_add(int i, atomic_t *v)
return increment;
}
-static inline int __atomic_sub(int i, atomic_t *v)
+static inline int __atomic24_sub(int i, atomic24_t *v)
{
register volatile int *ptr asm("g1");
register int increment asm("g2");
@@ -83,7 +144,7 @@ static inline int __atomic_sub(int i, atomic_t *v)
__asm__ __volatile__(
"mov %%o7, %%g4\n\t"
- "call ___atomic_sub\n\t"
+ "call ___atomic24_sub\n\t"
" add %%o7, 8, %%o7\n"
: "=&r" (increment), "=r" (tmp1), "=r" (tmp2), "=r" (tmp3)
: "0" (increment), "r" (ptr)
@@ -92,19 +153,19 @@ static inline int __atomic_sub(int i, atomic_t *v)
return increment;
}
-#define atomic_add(i, v) ((void)__atomic_add((i), (v)))
-#define atomic_sub(i, v) ((void)__atomic_sub((i), (v)))
+#define atomic24_add(i, v) ((void)__atomic24_add((i), (v)))
+#define atomic24_sub(i, v) ((void)__atomic24_sub((i), (v)))
-#define atomic_dec_return(v) __atomic_sub(1, (v))
-#define atomic_inc_return(v) __atomic_add(1, (v))
+#define atomic24_dec_return(v) __atomic24_sub(1, (v))
+#define atomic24_inc_return(v) __atomic24_add(1, (v))
-#define atomic_sub_and_test(i, v) (__atomic_sub((i), (v)) == 0)
-#define atomic_dec_and_test(v) (__atomic_sub(1, (v)) == 0)
+#define atomic24_sub_and_test(i, v) (__atomic24_sub((i), (v)) == 0)
+#define atomic24_dec_and_test(v) (__atomic24_sub(1, (v)) == 0)
-#define atomic_inc(v) ((void)__atomic_add(1, (v)))
-#define atomic_dec(v) ((void)__atomic_sub(1, (v)))
+#define atomic24_inc(v) ((void)__atomic24_add(1, (v)))
+#define atomic24_dec(v) ((void)__atomic24_sub(1, (v)))
-#define atomic_add_negative(i, v) (__atomic_add((i), (v)) < 0)
+#define atomic24_add_negative(i, v) (__atomic24_add((i), (v)) < 0)
/* Atomic operations are already serializing */
#define smp_mb__before_atomic_dec() barrier()
diff --git a/include/asm-sparc/dma-mapping.h b/include/asm-sparc/dma-mapping.h
index 779cfef77d00..2dc5bb8effa6 100644
--- a/include/asm-sparc/dma-mapping.h
+++ b/include/asm-sparc/dma-mapping.h
@@ -1,5 +1,25 @@
+#ifndef _ASM_SPARC_DMA_MAPPING_H
+#define _ASM_SPARC_DMA_MAPPING_H
+
#include <linux/config.h>
#ifdef CONFIG_PCI
#include <asm-generic/dma-mapping.h>
-#endif
+#else
+
+static inline void *dma_alloc_coherent(struct device *dev, size_t size,
+ dma_addr_t *dma_handle, int flag)
+{
+ BUG();
+ return NULL;
+}
+
+static inline void dma_free_coherent(struct device *dev, size_t size,
+ void *vaddr, dma_addr_t dma_handle)
+{
+ BUG();
+}
+
+#endif /* PCI */
+
+#endif /* _ASM_SPARC_DMA_MAPPING_H */
diff --git a/include/asm-sparc/processor.h b/include/asm-sparc/processor.h
index b0c5a0d09204..0a9a4f5bc585 100644
--- a/include/asm-sparc/processor.h
+++ b/include/asm-sparc/processor.h
@@ -22,7 +22,6 @@
#include <asm/segment.h>
#include <asm/btfixup.h>
#include <asm/page.h>
-#include <asm/atomic.h>
/*
* Bus types
diff --git a/include/asm-sparc/semaphore.h b/include/asm-sparc/semaphore.h
index 0e6122ae3058..b3b16d121ace 100644
--- a/include/asm-sparc/semaphore.h
+++ b/include/asm-sparc/semaphore.h
@@ -10,7 +10,7 @@
#include <linux/rwsem.h>
struct semaphore {
- atomic_t count;
+ atomic24_t count;
int sleepers;
wait_queue_head_t wait;
#if WAITQUEUE_DEBUG
@@ -40,7 +40,7 @@ struct semaphore {
static inline void sema_init (struct semaphore *sem, int val)
{
- atomic_set(&sem->count, val);
+ atomic24_set(&sem->count, val);
sem->sleepers = 0;
init_waitqueue_head(&sem->wait);
#if WAITQUEUE_DEBUG
@@ -78,7 +78,7 @@ static inline void down(struct semaphore * sem)
__asm__ __volatile__(
"mov %%o7, %%g4\n\t"
- "call ___atomic_sub\n\t"
+ "call ___atomic24_sub\n\t"
" add %%o7, 8, %%o7\n\t"
"tst %%g2\n\t"
"bl 2f\n\t"
@@ -115,7 +115,7 @@ static inline int down_interruptible(struct semaphore * sem)
__asm__ __volatile__(
"mov %%o7, %%g4\n\t"
- "call ___atomic_sub\n\t"
+ "call ___atomic24_sub\n\t"
" add %%o7, 8, %%o7\n\t"
"tst %%g2\n\t"
"bl 2f\n\t"
@@ -154,7 +154,7 @@ static inline int down_trylock(struct semaphore * sem)
__asm__ __volatile__(
"mov %%o7, %%g4\n\t"
- "call ___atomic_sub\n\t"
+ "call ___atomic24_sub\n\t"
" add %%o7, 8, %%o7\n\t"
"tst %%g2\n\t"
"bl 2f\n\t"
@@ -193,7 +193,7 @@ static inline void up(struct semaphore * sem)
__asm__ __volatile__(
"mov %%o7, %%g4\n\t"
- "call ___atomic_add\n\t"
+ "call ___atomic24_add\n\t"
" add %%o7, 8, %%o7\n\t"
"tst %%g2\n\t"
"ble 2f\n\t"
diff --git a/include/asm-sparc/system.h b/include/asm-sparc/system.h
index b53cf2c6897e..a4c7d566e075 100644
--- a/include/asm-sparc/system.h
+++ b/include/asm-sparc/system.h
@@ -171,32 +171,11 @@ extern void fpsave(unsigned long *fpregs, unsigned long *fsr,
/*
* Changing the IRQ level on the Sparc.
*/
-extern __inline__ void setipl(unsigned long __orig_psr)
-{
- __asm__ __volatile__(
- "wr %0, 0x0, %%psr\n\t"
- "nop; nop; nop\n"
- : /* no outputs */
- : "r" (__orig_psr)
- : "memory", "cc");
-}
+extern void local_irq_restore(unsigned long);
+extern unsigned long __local_irq_save(void);
+extern void local_irq_enable(void);
-extern __inline__ void local_irq_enable(void)
-{
- unsigned long tmp;
-
- __asm__ __volatile__(
- "rd %%psr, %0\n\t"
- "nop; nop; nop;\n\t" /* Sun4m + Cypress + SMP bug */
- "andn %0, %1, %0\n\t"
- "wr %0, 0x0, %%psr\n\t"
- "nop; nop; nop\n"
- : "=r" (tmp)
- : "i" (PSR_PIL)
- : "memory");
-}
-
-extern __inline__ unsigned long getipl(void)
+static inline unsigned long getipl(void)
{
unsigned long retval;
@@ -204,76 +183,11 @@ extern __inline__ unsigned long getipl(void)
return retval;
}
-#if 0 /* not used */
-extern __inline__ unsigned long swap_pil(unsigned long __new_psr)
-{
- unsigned long retval;
-
- __asm__ __volatile__(
- "rd %%psr, %0\n\t"
- "nop; nop; nop;\n\t" /* Sun4m + Cypress + SMP bug */
- "and %0, %2, %%g1\n\t"
- "and %1, %2, %%g2\n\t"
- "xorcc %%g1, %%g2, %%g0\n\t"
- "be 1f\n\t"
- " nop\n\t"
- "wr %0, %2, %%psr\n\t"
- "nop; nop; nop;\n"
- "1:\n"
- : "=&r" (retval)
- : "r" (__new_psr), "i" (PSR_PIL)
- : "g1", "g2", "memory", "cc");
-
- return retval;
-}
-#endif
-
-extern __inline__ unsigned long read_psr_and_cli(void)
-{
- unsigned long retval;
-
- __asm__ __volatile__(
- "rd %%psr, %0\n\t"
- "nop; nop; nop;\n\t" /* Sun4m + Cypress + SMP bug */
- "or %0, %1, %%g1\n\t"
- "wr %%g1, 0x0, %%psr\n\t"
- "nop; nop; nop\n\t"
- : "=r" (retval)
- : "i" (PSR_PIL)
- : "g1", "memory");
-
- return retval;
-}
-
#define local_save_flags(flags) ((flags) = getipl())
-#define local_irq_save(flags) ((flags) = read_psr_and_cli())
-#define local_irq_restore(flags) setipl((flags))
-#define local_irq_disable() ((void) read_psr_and_cli())
-
+#define local_irq_save(flags) ((flags) = __local_irq_save())
+#define local_irq_disable() ((void) __local_irq_save())
#define irqs_disabled() ((getipl() & PSR_PIL) != 0)
-#ifdef CONFIG_SMP
-
-extern unsigned char global_irq_holder;
-
-#define save_and_cli(flags) do { save_flags(flags); cli(); } while(0)
-
-extern void __global_cli(void);
-extern void __global_sti(void);
-extern unsigned long __global_save_flags(void);
-extern void __global_restore_flags(unsigned long flags);
-#define cli() __global_cli()
-#define sti() __global_sti()
-#define save_flags(flags) ((flags)=__global_save_flags())
-#define restore_flags(flags) __global_restore_flags(flags)
-
-#else
-
-#define cli() local_irq_disable()
-#define sti() local_irq_enable()
-
-#endif
-
/* XXX Change this if we ever use a PSO mode kernel. */
#define mb() __asm__ __volatile__ ("" : : : "memory")
#define rmb() mb()
diff --git a/include/asm-x86_64/atomic.h b/include/asm-x86_64/atomic.h
index fb8d4f54d3d0..baf472fb50f2 100644
--- a/include/asm-x86_64/atomic.h
+++ b/include/asm-x86_64/atomic.h
@@ -29,8 +29,7 @@ typedef struct { volatile int counter; } atomic_t;
* atomic_read - read atomic variable
* @v: pointer of type atomic_t
*
- * Atomically reads the value of @v. Note that the guaranteed
- * useful range of an atomic_t is only 24 bits.
+ * Atomically reads the value of @v.
*/
#define atomic_read(v) ((v)->counter)
@@ -39,8 +38,7 @@ typedef struct { volatile int counter; } atomic_t;
* @v: pointer of type atomic_t
* @i: required value
*
- * Atomically sets the value of @v to @i. Note that the guaranteed
- * useful range of an atomic_t is only 24 bits.
+ * Atomically sets the value of @v to @i.
*/
#define atomic_set(v,i) (((v)->counter) = (i))
@@ -49,8 +47,7 @@ typedef struct { volatile int counter; } atomic_t;
* @i: integer value to add
* @v: pointer of type atomic_t
*
- * Atomically adds @i to @v. Note that the guaranteed useful range
- * of an atomic_t is only 24 bits.
+ * Atomically adds @i to @v.
*/
static __inline__ void atomic_add(int i, atomic_t *v)
{
@@ -65,8 +62,7 @@ static __inline__ void atomic_add(int i, atomic_t *v)
* @i: integer value to subtract
* @v: pointer of type atomic_t
*
- * Atomically subtracts @i from @v. Note that the guaranteed
- * useful range of an atomic_t is only 24 bits.
+ * Atomically subtracts @i from @v.
*/
static __inline__ void atomic_sub(int i, atomic_t *v)
{
@@ -83,8 +79,7 @@ static __inline__ void atomic_sub(int i, atomic_t *v)
*
* Atomically subtracts @i from @v and returns
* true if the result is zero, or false for all
- * other cases. Note that the guaranteed
- * useful range of an atomic_t is only 24 bits.
+ * other cases.
*/
static __inline__ int atomic_sub_and_test(int i, atomic_t *v)
{
@@ -101,8 +96,7 @@ static __inline__ int atomic_sub_and_test(int i, atomic_t *v)
* atomic_inc - increment atomic variable
* @v: pointer of type atomic_t
*
- * Atomically increments @v by 1. Note that the guaranteed
- * useful range of an atomic_t is only 24 bits.
+ * Atomically increments @v by 1.
*/
static __inline__ void atomic_inc(atomic_t *v)
{
@@ -116,8 +110,7 @@ static __inline__ void atomic_inc(atomic_t *v)
* atomic_dec - decrement atomic variable
* @v: pointer of type atomic_t
*
- * Atomically decrements @v by 1. Note that the guaranteed
- * useful range of an atomic_t is only 24 bits.
+ * Atomically decrements @v by 1.
*/
static __inline__ void atomic_dec(atomic_t *v)
{
@@ -133,8 +126,7 @@ static __inline__ void atomic_dec(atomic_t *v)
*
* Atomically decrements @v by 1 and
* returns true if the result is 0, or false for all other
- * cases. Note that the guaranteed
- * useful range of an atomic_t is only 24 bits.
+ * cases.
*/
static __inline__ int atomic_dec_and_test(atomic_t *v)
{
@@ -153,8 +145,7 @@ static __inline__ int atomic_dec_and_test(atomic_t *v)
*
* Atomically increments @v by 1
* and returns true if the result is zero, or false for all
- * other cases. Note that the guaranteed
- * useful range of an atomic_t is only 24 bits.
+ * other cases.
*/
static __inline__ int atomic_inc_and_test(atomic_t *v)
{
@@ -174,8 +165,7 @@ static __inline__ int atomic_inc_and_test(atomic_t *v)
*
* Atomically adds @i to @v and returns true
* if the result is negative, or false when
- * result is greater than or equal to zero. Note that the guaranteed
- * useful range of an atomic_t is only 24 bits.
+ * result is greater than or equal to zero.
*/
static __inline__ int atomic_add_negative(int i, atomic_t *v)
{