git://git.onelab.eu
/
linux-2.6.git
/ blobdiff
commit
grep
author
committer
pickaxe
?
search:
re
summary
|
shortlog
|
log
|
commit
|
commitdiff
|
tree
raw
|
inline
| side by side
vserver 1.9.5.x5
[linux-2.6.git]
/
include
/
asm-ia64
/
spinlock.h
diff --git
a/include/asm-ia64/spinlock.h
b/include/asm-ia64/spinlock.h
index
28ba913
..
909936f
100644
(file)
--- a/
include/asm-ia64/spinlock.h
+++ b/
include/asm-ia64/spinlock.h
@@
-19,6
+19,9
@@
typedef struct {
volatile unsigned int lock;
typedef struct {
volatile unsigned int lock;
+#ifdef CONFIG_PREEMPT
+ unsigned int break_lock;
+#endif
} spinlock_t;
#define SPIN_LOCK_UNLOCKED (spinlock_t) { 0 }
} spinlock_t;
#define SPIN_LOCK_UNLOCKED (spinlock_t) { 0 }
@@
-32,10
+35,10
@@
typedef struct {
* carefully coded to touch only those registers that spin_lock() marks "clobbered".
*/
* carefully coded to touch only those registers that spin_lock() marks "clobbered".
*/
-#define IA64_SPINLOCK_CLOBBERS "ar.ccv", "ar.pfs", "p14", "r28", "r29", "r30", "b6", "memory"
+#define IA64_SPINLOCK_CLOBBERS "ar.ccv", "ar.pfs", "p14", "
p15", "r27", "
r28", "r29", "r30", "b6", "memory"
static inline void
static inline void
-_raw_spin_lock
(spinlock_t *lock
)
+_raw_spin_lock
_flags (spinlock_t *lock, unsigned long flags
)
{
register volatile unsigned int *ptr asm ("r31") = &lock->lock;
{
register volatile unsigned int *ptr asm ("r31") = &lock->lock;
@@
-50,9
+53,10
@@
_raw_spin_lock (spinlock_t *lock)
"cmpxchg4.acq r30 = [%1], r30, ar.ccv\n\t"
"movl r29 = ia64_spinlock_contention_pre3_4;;\n\t"
"cmp4.ne p14, p0 = r30, r0\n\t"
"cmpxchg4.acq r30 = [%1], r30, ar.ccv\n\t"
"movl r29 = ia64_spinlock_contention_pre3_4;;\n\t"
"cmp4.ne p14, p0 = r30, r0\n\t"
- "mov b6 = r29;;\n"
+ "mov b6 = r29;;\n\t"
+ "mov r27=%2\n\t"
"(p14) br.cond.spnt.many b6"
"(p14) br.cond.spnt.many b6"
- : "=r"(ptr) : "r"(ptr) : IA64_SPINLOCK_CLOBBERS);
+ : "=r"(ptr) : "r"(ptr)
, "r" (flags)
: IA64_SPINLOCK_CLOBBERS);
# else
asm volatile ("{\n\t"
" mov ar.ccv = r0\n\t"
# else
asm volatile ("{\n\t"
" mov ar.ccv = r0\n\t"
@@
-60,33
+64,38
@@
_raw_spin_lock (spinlock_t *lock)
" mov r30 = 1;;\n\t"
"}\n\t"
"cmpxchg4.acq r30 = [%1], r30, ar.ccv;;\n\t"
" mov r30 = 1;;\n\t"
"}\n\t"
"cmpxchg4.acq r30 = [%1], r30, ar.ccv;;\n\t"
- "cmp4.ne p14, p0 = r30, r0\n"
+ "cmp4.ne p14, p0 = r30, r0\n\t"
+ "mov r27=%2\n\t"
"(p14) brl.cond.spnt.many ia64_spinlock_contention_pre3_4;;"
"(p14) brl.cond.spnt.many ia64_spinlock_contention_pre3_4;;"
- : "=r"(ptr) : "r"(ptr) : IA64_SPINLOCK_CLOBBERS);
+ : "=r"(ptr) : "r"(ptr)
, "r" (flags)
: IA64_SPINLOCK_CLOBBERS);
# endif /* CONFIG_MCKINLEY */
#else
# ifdef CONFIG_ITANIUM
/* don't use brl on Itanium... */
/* mis-declare, so we get the entry-point, not it's function descriptor: */
asm volatile ("mov r30 = 1\n\t"
# endif /* CONFIG_MCKINLEY */
#else
# ifdef CONFIG_ITANIUM
/* don't use brl on Itanium... */
/* mis-declare, so we get the entry-point, not it's function descriptor: */
asm volatile ("mov r30 = 1\n\t"
+ "mov r27=%2\n\t"
"mov ar.ccv = r0;;\n\t"
"cmpxchg4.acq r30 = [%0], r30, ar.ccv\n\t"
"movl r29 = ia64_spinlock_contention;;\n\t"
"cmp4.ne p14, p0 = r30, r0\n\t"
"mov ar.ccv = r0;;\n\t"
"cmpxchg4.acq r30 = [%0], r30, ar.ccv\n\t"
"movl r29 = ia64_spinlock_contention;;\n\t"
"cmp4.ne p14, p0 = r30, r0\n\t"
- "mov b6 = r29;;\n"
+ "mov b6 = r29;;\n
\t
"
"(p14) br.call.spnt.many b6 = b6"
"(p14) br.call.spnt.many b6 = b6"
- : "=r"(ptr) : "r"(ptr) : IA64_SPINLOCK_CLOBBERS);
+ : "=r"(ptr) : "r"(ptr)
, "r" (flags)
: IA64_SPINLOCK_CLOBBERS);
# else
asm volatile ("mov r30 = 1\n\t"
# else
asm volatile ("mov r30 = 1\n\t"
+ "mov r27=%2\n\t"
"mov ar.ccv = r0;;\n\t"
"cmpxchg4.acq r30 = [%0], r30, ar.ccv;;\n\t"
"cmp4.ne p14, p0 = r30, r0\n\t"
"(p14) brl.call.spnt.many b6=ia64_spinlock_contention;;"
"mov ar.ccv = r0;;\n\t"
"cmpxchg4.acq r30 = [%0], r30, ar.ccv;;\n\t"
"cmp4.ne p14, p0 = r30, r0\n\t"
"(p14) brl.call.spnt.many b6=ia64_spinlock_contention;;"
- : "=r"(ptr) : "r"(ptr) : IA64_SPINLOCK_CLOBBERS);
+ : "=r"(ptr) : "r"(ptr)
, "r" (flags)
: IA64_SPINLOCK_CLOBBERS);
# endif /* CONFIG_MCKINLEY */
#endif
}
# endif /* CONFIG_MCKINLEY */
#endif
}
+#define _raw_spin_lock(lock) _raw_spin_lock_flags(lock, 0)
#else /* !ASM_SUPPORTED */
#else /* !ASM_SUPPORTED */
+#define _raw_spin_lock_flags(lock, flags) _raw_spin_lock(lock)
# define _raw_spin_lock(x) \
do { \
__u32 *ia64_spinlock_ptr = (__u32 *) (x); \
# define _raw_spin_lock(x) \
do { \
__u32 *ia64_spinlock_ptr = (__u32 *) (x); \
@@
-108,13
+117,17
@@
do { \
#define spin_unlock_wait(x) do { barrier(); } while ((x)->lock)
typedef struct {
#define spin_unlock_wait(x) do { barrier(); } while ((x)->lock)
typedef struct {
- volatile int read_counter : 31;
- volatile int write_lock : 1;
+ volatile unsigned int read_counter : 31;
+ volatile unsigned int write_lock : 1;
+#ifdef CONFIG_PREEMPT
+ unsigned int break_lock;
+#endif
} rwlock_t;
#define RW_LOCK_UNLOCKED (rwlock_t) { 0, 0 }
#define rwlock_init(x) do { *(x) = RW_LOCK_UNLOCKED; } while(0)
} rwlock_t;
#define RW_LOCK_UNLOCKED (rwlock_t) { 0, 0 }
#define rwlock_init(x) do { *(x) = RW_LOCK_UNLOCKED; } while(0)
-#define rwlock_is_locked(x) (*(volatile int *) (x) != 0)
+#define read_can_lock(rw) (*(volatile int *)(rw) >= 0)
+#define write_can_lock(rw) (*(volatile int *)(rw) == 0)
#define _raw_read_lock(rw) \
do { \
#define _raw_read_lock(rw) \
do { \
@@
-184,6
+197,8
@@
do { \
#endif /* !ASM_SUPPORTED */
#endif /* !ASM_SUPPORTED */
+#define _raw_read_trylock(lock) generic_raw_read_trylock(lock)
+
#define _raw_write_unlock(x) \
({ \
smp_mb__before_clear_bit(); /* need barrier before releasing lock... */ \
#define _raw_write_unlock(x) \
({ \
smp_mb__before_clear_bit(); /* need barrier before releasing lock... */ \