This commit was manufactured by cvs2svn to create branch 'vserver'.
[linux-2.6.git] / include / asm-i386 / mach-xen / asm / synch_bitops.h
1 #ifndef __XEN_SYNCH_BITOPS_H__
2 #define __XEN_SYNCH_BITOPS_H__
3
4 /*
5  * Copyright 1992, Linus Torvalds.
6  * Heavily modified to provide guaranteed strong synchronisation
7  * when communicating with Xen or other guest OSes running on other CPUs.
8  */
9
10 #include <linux/config.h>
11
12 #define ADDR (*(volatile long *) addr)
13
14 static __inline__ void synch_set_bit(int nr, volatile void * addr)
15 {
16     __asm__ __volatile__ ( 
17         "lock btsl %1,%0"
18         : "+m" (ADDR) : "Ir" (nr) : "memory" );
19 }
20
21 static __inline__ void synch_clear_bit(int nr, volatile void * addr)
22 {
23     __asm__ __volatile__ (
24         "lock btrl %1,%0"
25         : "+m" (ADDR) : "Ir" (nr) : "memory" );
26 }
27
28 static __inline__ void synch_change_bit(int nr, volatile void * addr)
29 {
30     __asm__ __volatile__ (
31         "lock btcl %1,%0"
32         : "+m" (ADDR) : "Ir" (nr) : "memory" );
33 }
34
35 static __inline__ int synch_test_and_set_bit(int nr, volatile void * addr)
36 {
37     int oldbit;
38     __asm__ __volatile__ (
39         "lock btsl %2,%1\n\tsbbl %0,%0"
40         : "=r" (oldbit), "+m" (ADDR) : "Ir" (nr) : "memory");
41     return oldbit;
42 }
43
44 static __inline__ int synch_test_and_clear_bit(int nr, volatile void * addr)
45 {
46     int oldbit;
47     __asm__ __volatile__ (
48         "lock btrl %2,%1\n\tsbbl %0,%0"
49         : "=r" (oldbit), "+m" (ADDR) : "Ir" (nr) : "memory");
50     return oldbit;
51 }
52
53 static __inline__ int synch_test_and_change_bit(int nr, volatile void * addr)
54 {
55     int oldbit;
56
57     __asm__ __volatile__ (
58         "lock btcl %2,%1\n\tsbbl %0,%0"
59         : "=r" (oldbit), "+m" (ADDR) : "Ir" (nr) : "memory");
60     return oldbit;
61 }
62
63 struct __synch_xchg_dummy { unsigned long a[100]; };
64 #define __synch_xg(x) ((struct __synch_xchg_dummy *)(x))
65
66 #define synch_cmpxchg(ptr, old, new) \
67 ((__typeof__(*(ptr)))__synch_cmpxchg((ptr),\
68                                      (unsigned long)(old), \
69                                      (unsigned long)(new), \
70                                      sizeof(*(ptr))))
71
72 static inline unsigned long __synch_cmpxchg(volatile void *ptr,
73                                             unsigned long old,
74                                             unsigned long new, int size)
75 {
76         unsigned long prev;
77         switch (size) {
78         case 1:
79                 __asm__ __volatile__("lock; cmpxchgb %b1,%2"
80                                      : "=a"(prev)
81                                      : "q"(new), "m"(*__synch_xg(ptr)),
82                                        "0"(old)
83                                      : "memory");
84                 return prev;
85         case 2:
86                 __asm__ __volatile__("lock; cmpxchgw %w1,%2"
87                                      : "=a"(prev)
88                                      : "r"(new), "m"(*__synch_xg(ptr)),
89                                        "0"(old)
90                                      : "memory");
91                 return prev;
92 #ifdef CONFIG_X86_64
93         case 4:
94                 __asm__ __volatile__("lock; cmpxchgl %k1,%2"
95                                      : "=a"(prev)
96                                      : "r"(new), "m"(*__synch_xg(ptr)),
97                                        "0"(old)
98                                      : "memory");
99                 return prev;
100         case 8:
101                 __asm__ __volatile__("lock; cmpxchgq %1,%2"
102                                      : "=a"(prev)
103                                      : "r"(new), "m"(*__synch_xg(ptr)),
104                                        "0"(old)
105                                      : "memory");
106                 return prev;
107 #else
108         case 4:
109                 __asm__ __volatile__("lock; cmpxchgl %1,%2"
110                                      : "=a"(prev)
111                                      : "r"(new), "m"(*__synch_xg(ptr)),
112                                        "0"(old)
113                                      : "memory");
114                 return prev;
115 #endif
116         }
117         return old;
118 }
119
120 static __always_inline int synch_const_test_bit(int nr,
121                                                 const volatile void * addr)
122 {
123     return ((1UL << (nr & 31)) & 
124             (((const volatile unsigned int *) addr)[nr >> 5])) != 0;
125 }
126
127 static __inline__ int synch_var_test_bit(int nr, volatile void * addr)
128 {
129     int oldbit;
130     __asm__ __volatile__ (
131         "btl %2,%1\n\tsbbl %0,%0"
132         : "=r" (oldbit) : "m" (ADDR), "Ir" (nr) );
133     return oldbit;
134 }
135
136 #define synch_test_bit(nr,addr) \
137 (__builtin_constant_p(nr) ? \
138  synch_const_test_bit((nr),(addr)) : \
139  synch_var_test_bit((nr),(addr)))
140
141 #define synch_cmpxchg_subword synch_cmpxchg
142
143 #endif /* __XEN_SYNCH_BITOPS_H__ */