* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
- * Copyright (C) 1996, 97, 99, 2000, 03, 04 by Ralf Baechle
+ * Copyright (C) 1996, 97, 99, 2000, 03, 04, 06 by Ralf Baechle
*/
-
-/*
- * As workaround for the ATOMIC_DEC_AND_LOCK / atomic_dec_and_lock mess in
- * <linux/spinlock.h> we have to include <linux/spinlock.h> outside the
- * main big wrapper ...
- */
-#include <linux/config.h>
-#include <linux/spinlock.h>
-
#ifndef _ASM_ATOMIC_H
#define _ASM_ATOMIC_H
+#include <linux/irqflags.h>
+#include <asm/barrier.h>
#include <asm/cpu-features.h>
-#include <asm/interrupt.h>
#include <asm/war.h>
typedef struct { volatile int counter; } atomic_t;
{
unsigned long result;
+ smp_mb();
+
if (cpu_has_llsc && R10000_LLSC_WAR) {
unsigned long temp;
" sc %0, %2 \n"
" beqzl %0, 1b \n"
" addu %0, %1, %3 \n"
- " sync \n"
" .set mips0 \n"
: "=&r" (result), "=&r" (temp), "=m" (v->counter)
: "Ir" (i), "m" (v->counter)
" sc %0, %2 \n"
" beqz %0, 1b \n"
" addu %0, %1, %3 \n"
- " sync \n"
" .set mips0 \n"
: "=&r" (result), "=&r" (temp), "=m" (v->counter)
: "Ir" (i), "m" (v->counter)
local_irq_restore(flags);
}
+ smp_mb();
+
return result;
}
{
unsigned long result;
+ smp_mb();
+
if (cpu_has_llsc && R10000_LLSC_WAR) {
unsigned long temp;
" sc %0, %2 \n"
" beqzl %0, 1b \n"
" subu %0, %1, %3 \n"
- " sync \n"
" .set mips0 \n"
: "=&r" (result), "=&r" (temp), "=m" (v->counter)
: "Ir" (i), "m" (v->counter)
" sc %0, %2 \n"
" beqz %0, 1b \n"
" subu %0, %1, %3 \n"
- " sync \n"
" .set mips0 \n"
: "=&r" (result), "=&r" (temp), "=m" (v->counter)
: "Ir" (i), "m" (v->counter)
local_irq_restore(flags);
}
+ smp_mb();
+
return result;
}
{
unsigned long result;
+ smp_mb();
+
if (cpu_has_llsc && R10000_LLSC_WAR) {
unsigned long temp;
" beqzl %0, 1b \n"
" subu %0, %1, %3 \n"
" .set reorder \n"
- " sync \n"
"1: \n"
" .set mips0 \n"
: "=&r" (result), "=&r" (temp), "=m" (v->counter)
" beqz %0, 1b \n"
" subu %0, %1, %3 \n"
" .set reorder \n"
- " sync \n"
"1: \n"
" .set mips0 \n"
: "=&r" (result), "=&r" (temp), "=m" (v->counter)
local_irq_restore(flags);
}
+ smp_mb();
+
return result;
}
#ifdef CONFIG_64BIT
-typedef struct { volatile __s64 counter; } atomic64_t;
+typedef struct { volatile long counter; } atomic64_t;
#define ATOMIC64_INIT(i) { (i) }
{
unsigned long result;
+ smp_mb();
+
if (cpu_has_llsc && R10000_LLSC_WAR) {
unsigned long temp;
" scd %0, %2 \n"
" beqzl %0, 1b \n"
" addu %0, %1, %3 \n"
- " sync \n"
" .set mips0 \n"
: "=&r" (result), "=&r" (temp), "=m" (v->counter)
: "Ir" (i), "m" (v->counter)
" scd %0, %2 \n"
" beqz %0, 1b \n"
" addu %0, %1, %3 \n"
- " sync \n"
" .set mips0 \n"
: "=&r" (result), "=&r" (temp), "=m" (v->counter)
: "Ir" (i), "m" (v->counter)
local_irq_restore(flags);
}
+ smp_mb();
+
return result;
}
{
unsigned long result;
+ smp_mb();
+
if (cpu_has_llsc && R10000_LLSC_WAR) {
unsigned long temp;
" scd %0, %2 \n"
" beqzl %0, 1b \n"
" subu %0, %1, %3 \n"
- " sync \n"
" .set mips0 \n"
: "=&r" (result), "=&r" (temp), "=m" (v->counter)
: "Ir" (i), "m" (v->counter)
" scd %0, %2 \n"
" beqz %0, 1b \n"
" subu %0, %1, %3 \n"
- " sync \n"
" .set mips0 \n"
: "=&r" (result), "=&r" (temp), "=m" (v->counter)
: "Ir" (i), "m" (v->counter)
local_irq_restore(flags);
}
+ smp_mb();
+
return result;
}
{
unsigned long result;
+ smp_mb();
+
if (cpu_has_llsc && R10000_LLSC_WAR) {
unsigned long temp;
" beqzl %0, 1b \n"
" dsubu %0, %1, %3 \n"
" .set reorder \n"
- " sync \n"
"1: \n"
" .set mips0 \n"
: "=&r" (result), "=&r" (temp), "=m" (v->counter)
" beqz %0, 1b \n"
" dsubu %0, %1, %3 \n"
" .set reorder \n"
- " sync \n"
"1: \n"
" .set mips0 \n"
: "=&r" (result), "=&r" (temp), "=m" (v->counter)
local_irq_restore(flags);
}
+ smp_mb();
+
return result;
}