linux 2.6.16.38 w/ vs2.0.3-rc1
[linux-2.6.git] / include / asm-i386 / mutex.h
1 /*
2  * Assembly implementation of the mutex fastpath, based on atomic
3  * decrement/increment.
4  *
5  * started by Ingo Molnar:
6  *
7  *  Copyright (C) 2004, 2005, 2006 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
8  */
9 #ifndef _ASM_MUTEX_H
10 #define _ASM_MUTEX_H
11
12 /**
13  *  __mutex_fastpath_lock - try to take the lock by moving the count
14  *                          from 1 to a 0 value
15  *  @count: pointer of type atomic_t
16  *  @fn: function to call if the original value was not 1
17  *
18  * Change the count from 1 to a value lower than 1, and call <fn> if it
19  * wasn't 1 originally. This function MUST leave the value lower than 1
20  * even when the "1" assertion wasn't true.
21  */
22 #define __mutex_fastpath_lock(count, fail_fn)                           \
23 do {                                                                    \
24         unsigned int dummy;                                             \
25                                                                         \
26         typecheck(atomic_t *, count);                                   \
27         typecheck_fn(fastcall void (*)(atomic_t *), fail_fn);           \
28                                                                         \
29         __asm__ __volatile__(                                           \
30                 LOCK    "   decl (%%eax)        \n"                     \
31                         "   js 2f               \n"                     \
32                         "1:                     \n"                     \
33                                                                         \
34                 LOCK_SECTION_START("")                                  \
35                         "2: call "#fail_fn"     \n"                     \
36                         "   jmp 1b              \n"                     \
37                 LOCK_SECTION_END                                        \
38                                                                         \
39                 :"=a" (dummy)                                           \
40                 : "a" (count)                                           \
41                 : "memory", "ecx", "edx");                              \
42 } while (0)
43
44
45 /**
46  *  __mutex_fastpath_lock_retval - try to take the lock by moving the count
47  *                                 from 1 to a 0 value
48  *  @count: pointer of type atomic_t
49  *  @fail_fn: function to call if the original value was not 1
50  *
51  * Change the count from 1 to a value lower than 1, and call <fail_fn> if it
52  * wasn't 1 originally. This function returns 0 if the fastpath succeeds,
53  * or anything the slow path function returns
54  */
55 static inline int
56 __mutex_fastpath_lock_retval(atomic_t *count,
57                              int fastcall (*fail_fn)(atomic_t *))
58 {
59         if (unlikely(atomic_dec_return(count) < 0))
60                 return fail_fn(count);
61         else
62                 return 0;
63 }
64
65 /**
66  *  __mutex_fastpath_unlock - try to promote the mutex from 0 to 1
67  *  @count: pointer of type atomic_t
68  *  @fail_fn: function to call if the original value was not 0
69  *
70  * try to promote the mutex from 0 to 1. if it wasn't 0, call <fail_fn>.
71  * In the failure case, this function is allowed to either set the value
72  * to 1, or to set it to a value lower than 1.
73  *
74  * If the implementation sets it to a value of lower than 1, the
75  * __mutex_slowpath_needs_to_unlock() macro needs to return 1, it needs
76  * to return 0 otherwise.
77  */
78 #define __mutex_fastpath_unlock(count, fail_fn)                         \
79 do {                                                                    \
80         unsigned int dummy;                                             \
81                                                                         \
82         typecheck(atomic_t *, count);                                   \
83         typecheck_fn(fastcall void (*)(atomic_t *), fail_fn);           \
84                                                                         \
85         __asm__ __volatile__(                                           \
86                 LOCK    "   incl (%%eax)        \n"                     \
87                         "   jle 2f              \n"                     \
88                         "1:                     \n"                     \
89                                                                         \
90                 LOCK_SECTION_START("")                                  \
91                         "2: call "#fail_fn"     \n"                     \
92                         "   jmp 1b              \n"                     \
93                 LOCK_SECTION_END                                        \
94                                                                         \
95                 :"=a" (dummy)                                           \
96                 : "a" (count)                                           \
97                 : "memory", "ecx", "edx");                              \
98 } while (0)
99
100 #define __mutex_slowpath_needs_to_unlock()      1
101
102 /**
103  * __mutex_fastpath_trylock - try to acquire the mutex, without waiting
104  *
105  *  @count: pointer of type atomic_t
106  *  @fail_fn: fallback function
107  *
108  * Change the count from 1 to a value lower than 1, and return 0 (failure)
109  * if it wasn't 1 originally, or return 1 (success) otherwise. This function
110  * MUST leave the value lower than 1 even when the "1" assertion wasn't true.
111  * Additionally, if the value was < 0 originally, this function must not leave
112  * it to 0 on failure.
113  */
114 static inline int
115 __mutex_fastpath_trylock(atomic_t *count, int (*fail_fn)(atomic_t *))
116 {
117         /*
118          * We have two variants here. The cmpxchg based one is the best one
119          * because it never induce a false contention state.  It is included
120          * here because architectures using the inc/dec algorithms over the
121          * xchg ones are much more likely to support cmpxchg natively.
122          *
123          * If not we fall back to the spinlock based variant - that is
124          * just as efficient (and simpler) as a 'destructive' probing of
125          * the mutex state would be.
126          */
127 #ifdef __HAVE_ARCH_CMPXCHG
128         if (likely(atomic_cmpxchg(count, 1, 0) == 1))
129                 return 1;
130         return 0;
131 #else
132         return fail_fn(count);
133 #endif
134 }
135
136 #endif