This commit was manufactured by cvs2svn to create branch 'vserver'.
[linux-2.6.git] / include / asm-avr32 / system.h
1 /*
2  * Copyright (C) 2004-2006 Atmel Corporation
3  *
4  * This program is free software; you can redistribute it and/or modify
5  * it under the terms of the GNU General Public License version 2 as
6  * published by the Free Software Foundation.
7  */
8 #ifndef __ASM_AVR32_SYSTEM_H
9 #define __ASM_AVR32_SYSTEM_H
10
11 #include <linux/compiler.h>
12 #include <linux/types.h>
13
14 #include <asm/ptrace.h>
15 #include <asm/sysreg.h>
16
17 #define xchg(ptr,x) \
18         ((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr))))
19
20 #define nop() asm volatile("nop")
21
22 #define mb()                    asm volatile("" : : : "memory")
23 #define rmb()                   mb()
24 #define wmb()                   asm volatile("sync 0" : : : "memory")
25 #define read_barrier_depends()  do { } while(0)
26 #define set_mb(var, value)      do { var = value; mb(); } while(0)
27
28 /*
29  * Help PathFinder and other Nexus-compliant debuggers keep track of
30  * the current PID by emitting an Ownership Trace Message each time we
31  * switch task.
32  */
33 #ifdef CONFIG_OWNERSHIP_TRACE
34 #include <asm/ocd.h>
35 #define finish_arch_switch(prev)                        \
36         do {                                            \
37                 __mtdr(DBGREG_PID, prev->pid);          \
38                 __mtdr(DBGREG_PID, current->pid);       \
39         } while(0)
40 #endif
41
42 /*
43  * switch_to(prev, next, last) should switch from task `prev' to task
44  * `next'. `prev' will never be the same as `next'.
45  *
46  * We just delegate everything to the __switch_to assembly function,
47  * which is implemented in arch/avr32/kernel/switch_to.S
48  *
49  * mb() tells GCC not to cache `current' across this call.
50  */
51 struct cpu_context;
52 struct task_struct;
53 extern struct task_struct *__switch_to(struct task_struct *,
54                                        struct cpu_context *,
55                                        struct cpu_context *);
56 #define switch_to(prev, next, last)                                     \
57         do {                                                            \
58                 last = __switch_to(prev, &prev->thread.cpu_context + 1, \
59                                    &next->thread.cpu_context);          \
60         } while (0)
61
62 #ifdef CONFIG_SMP
63 # error "The AVR32 port does not support SMP"
64 #else
65 # define smp_mb()               barrier()
66 # define smp_rmb()              barrier()
67 # define smp_wmb()              barrier()
68 # define smp_read_barrier_depends() do { } while(0)
69 #endif
70
71 #include <linux/irqflags.h>
72
73 extern void __xchg_called_with_bad_pointer(void);
74
75 #ifdef __CHECKER__
76 extern unsigned long __builtin_xchg(void *ptr, unsigned long x);
77 #endif
78
79 #define xchg_u32(val, m) __builtin_xchg((void *)m, val)
80
81 static inline unsigned long __xchg(unsigned long x,
82                                        volatile void *ptr,
83                                        int size)
84 {
85         switch(size) {
86         case 4:
87                 return xchg_u32(x, ptr);
88         default:
89                 __xchg_called_with_bad_pointer();
90                 return x;
91         }
92 }
93
94 static inline unsigned long __cmpxchg_u32(volatile int *m, unsigned long old,
95                                           unsigned long new)
96 {
97         __u32 ret;
98
99         asm volatile(
100                 "1:     ssrf    5\n"
101                 "       ld.w    %[ret], %[m]\n"
102                 "       cp.w    %[ret], %[old]\n"
103                 "       brne    2f\n"
104                 "       stcond  %[m], %[new]\n"
105                 "       brne    1b\n"
106                 "2:\n"
107                 : [ret] "=&r"(ret), [m] "=m"(*m)
108                 : "m"(m), [old] "ir"(old), [new] "r"(new)
109                 : "memory", "cc");
110         return ret;
111 }
112
113 extern unsigned long __cmpxchg_u64_unsupported_on_32bit_kernels(
114         volatile int * m, unsigned long old, unsigned long new);
115 #define __cmpxchg_u64 __cmpxchg_u64_unsupported_on_32bit_kernels
116
117 /* This function doesn't exist, so you'll get a linker error
118    if something tries to do an invalid cmpxchg().  */
119 extern void __cmpxchg_called_with_bad_pointer(void);
120
121 #define __HAVE_ARCH_CMPXCHG 1
122
123 static inline unsigned long __cmpxchg(volatile void *ptr, unsigned long old,
124                                       unsigned long new, int size)
125 {
126         switch (size) {
127         case 4:
128                 return __cmpxchg_u32(ptr, old, new);
129         case 8:
130                 return __cmpxchg_u64(ptr, old, new);
131         }
132
133         __cmpxchg_called_with_bad_pointer();
134         return old;
135 }
136
137 #define cmpxchg(ptr, old, new)                                  \
138         ((typeof(*(ptr)))__cmpxchg((ptr), (unsigned long)(old), \
139                                    (unsigned long)(new),        \
140                                    sizeof(*(ptr))))
141
142 struct pt_regs;
143 extern void __die(const char *, struct pt_regs *, unsigned long,
144                   const char *, const char *, unsigned long);
145 extern void __die_if_kernel(const char *, struct pt_regs *, unsigned long,
146                             const char *, const char *, unsigned long);
147
148 #define die(msg, regs, err)                                     \
149         __die(msg, regs, err, __FILE__ ":", __FUNCTION__, __LINE__)
150 #define die_if_kernel(msg, regs, err)                                   \
151         __die_if_kernel(msg, regs, err, __FILE__ ":", __FUNCTION__, __LINE__)
152
153 #define arch_align_stack(x)     (x)
154
155 #endif /* __ASM_AVR32_SYSTEM_H */