vserver 1.9.3
[linux-2.6.git] / include / asm-sparc64 / uaccess.h
1 /* $Id: uaccess.h,v 1.35 2002/02/09 19:49:31 davem Exp $ */
2 #ifndef _ASM_UACCESS_H
3 #define _ASM_UACCESS_H
4
5 /*
6  * User space memory access functions
7  */
8
9 #ifdef __KERNEL__
10 #include <linux/compiler.h>
11 #include <linux/sched.h>
12 #include <linux/string.h>
13 #include <asm/a.out.h>
14 #include <asm/asi.h>
15 #include <asm/system.h>
16 #include <asm/spitfire.h>
17 #include <asm-generic/uaccess.h>
18 #endif
19
20 #ifndef __ASSEMBLY__
21
22 /*
23  * Sparc64 is segmented, though more like the M68K than the I386. 
24  * We use the secondary ASI to address user memory, which references a
25  * completely different VM map, thus there is zero chance of the user
26  * doing something queer and tricking us into poking kernel memory.
27  *
28  * What is left here is basically what is needed for the other parts of
29  * the kernel that expect to be able to manipulate, erum, "segments".
30  * Or perhaps more properly, permissions.
31  *
32  * "For historical reasons, these macros are grossly misnamed." -Linus
33  */
34
35 #define KERNEL_DS   ((mm_segment_t) { ASI_P })
36 #define USER_DS     ((mm_segment_t) { ASI_AIUS })       /* har har har */
37
38 #define VERIFY_READ     0
39 #define VERIFY_WRITE    1
40
41 #define get_fs() ((mm_segment_t) { get_thread_current_ds() })
42 #define get_ds() (KERNEL_DS)
43
44 #define segment_eq(a,b)  ((a).seg == (b).seg)
45
46 #define set_fs(val)                                                             \
47 do {                                                                            \
48         set_thread_current_ds((val).seg);                                       \
49         __asm__ __volatile__ ("wr %%g0, %0, %%asi" : : "r" ((val).seg));        \
50 } while(0)
51
52 #define __user_ok(addr,size) 1
53 #define __kernel_ok (segment_eq(get_fs(), KERNEL_DS))
54 #define __access_ok(addr,size) 1
55 #define access_ok(type,addr,size) 1
56
57 static inline int verify_area(int type, const void __user * addr, unsigned long size)
58 {
59         return 0;
60 }
61
62 /*
63  * The exception table consists of pairs of addresses: the first is the
64  * address of an instruction that is allowed to fault, and the second is
65  * the address at which the program should continue.  No registers are
66  * modified, so it is entirely up to the continuation code to figure out
67  * what to do.
68  *
69  * All the routines below use bits of fixup code that are out of line
70  * with the main instruction path.  This means when everything is well,
71  * we don't even have to jump over them.  Further, they do not intrude
72  * on our cache or tlb entries.
73  *
74  * There is a special way how to put a range of potentially faulting
75  * insns (like twenty ldd/std's with now intervening other instructions)
76  * You specify address of first in insn and 0 in fixup and in the next
77  * exception_table_entry you specify last potentially faulting insn + 1
78  * and in fixup the routine which should handle the fault.
79  * That fixup code will get
80  * (faulting_insn_address - first_insn_in_the_range_address)/4
81  * in %g2 (ie. index of the faulting instruction in the range).
82  */
83
84 struct exception_table_entry
85 {
86         unsigned insn, fixup;
87 };
88
89 /* Special exable search, which handles ranges.  Returns fixup */
90 unsigned long search_extables_range(unsigned long addr, unsigned long *g2);
91
92 extern void __ret_efault(void);
93
94 /* Uh, these should become the main single-value transfer routines..
95  * They automatically use the right size if we just have the right
96  * pointer type..
97  *
98  * This gets kind of ugly. We want to return _two_ values in "get_user()"
99  * and yet we don't want to do any pointers, because that is too much
100  * of a performance impact. Thus we have a few rather ugly macros here,
101  * and hide all the ugliness from the user.
102  */
103 #define put_user(x,ptr) ({ \
104 unsigned long __pu_addr = (unsigned long)(ptr); \
105 __chk_user_ptr(ptr); \
106 __put_user_nocheck((__typeof__(*(ptr)))(x),__pu_addr,sizeof(*(ptr))); })
107
108 #define get_user(x,ptr) ({ \
109 unsigned long __gu_addr = (unsigned long)(ptr); \
110 __chk_user_ptr(ptr); \
111 __get_user_nocheck((x),__gu_addr,sizeof(*(ptr)),__typeof__(*(ptr))); })
112
113 #define __put_user(x,ptr) put_user(x,ptr)
114 #define __get_user(x,ptr) get_user(x,ptr)
115
116 struct __large_struct { unsigned long buf[100]; };
117 #define __m(x) ((struct __large_struct *)(x))
118
119 #define __put_user_nocheck(data,addr,size) ({ \
120 register int __pu_ret; \
121 switch (size) { \
122 case 1: __put_user_asm(data,b,addr,__pu_ret); break; \
123 case 2: __put_user_asm(data,h,addr,__pu_ret); break; \
124 case 4: __put_user_asm(data,w,addr,__pu_ret); break; \
125 case 8: __put_user_asm(data,x,addr,__pu_ret); break; \
126 default: __pu_ret = __put_user_bad(); break; \
127 } __pu_ret; })
128
129 #define __put_user_nocheck_ret(data,addr,size,retval) ({ \
130 register int __foo __asm__ ("l1"); \
131 switch (size) { \
132 case 1: __put_user_asm_ret(data,b,addr,retval,__foo); break; \
133 case 2: __put_user_asm_ret(data,h,addr,retval,__foo); break; \
134 case 4: __put_user_asm_ret(data,w,addr,retval,__foo); break; \
135 case 8: __put_user_asm_ret(data,x,addr,retval,__foo); break; \
136 default: if (__put_user_bad()) return retval; break; \
137 } })
138
139 #define __put_user_asm(x,size,addr,ret)                                 \
140 __asm__ __volatile__(                                                   \
141         "/* Put user asm, inline. */\n"                                 \
142 "1:\t"  "st"#size "a %1, [%2] %%asi\n\t"                                \
143         "clr    %0\n"                                                   \
144 "2:\n\n\t"                                                              \
145         ".section .fixup,#alloc,#execinstr\n\t"                         \
146         ".align 4\n"                                                    \
147 "3:\n\t"                                                                \
148         "b      2b\n\t"                                                 \
149         " mov   %3, %0\n\n\t"                                           \
150         ".previous\n\t"                                                 \
151         ".section __ex_table,#alloc\n\t"                                \
152         ".align 4\n\t"                                                  \
153         ".word  1b, 3b\n\t"                                             \
154         ".previous\n\n\t"                                               \
155        : "=r" (ret) : "r" (x), "r" (__m(addr)),                         \
156          "i" (-EFAULT))
157
158 #define __put_user_asm_ret(x,size,addr,ret,foo)                         \
159 if (__builtin_constant_p(ret) && ret == -EFAULT)                        \
160 __asm__ __volatile__(                                                   \
161         "/* Put user asm ret, inline. */\n"                             \
162 "1:\t"  "st"#size "a %1, [%2] %%asi\n\n\t"                              \
163         ".section __ex_table,#alloc\n\t"                                \
164         ".align 4\n\t"                                                  \
165         ".word  1b, __ret_efault\n\n\t"                                 \
166         ".previous\n\n\t"                                               \
167        : "=r" (foo) : "r" (x), "r" (__m(addr)));                        \
168 else                                                                    \
169 __asm__ __volatile__(                                                   \
170         "/* Put user asm ret, inline. */\n"                             \
171 "1:\t"  "st"#size "a %1, [%2] %%asi\n\n\t"                              \
172         ".section .fixup,#alloc,#execinstr\n\t"                         \
173         ".align 4\n"                                                    \
174 "3:\n\t"                                                                \
175         "ret\n\t"                                                       \
176         " restore %%g0, %3, %%o0\n\n\t"                                 \
177         ".previous\n\t"                                                 \
178         ".section __ex_table,#alloc\n\t"                                \
179         ".align 4\n\t"                                                  \
180         ".word  1b, 3b\n\n\t"                                           \
181         ".previous\n\n\t"                                               \
182        : "=r" (foo) : "r" (x), "r" (__m(addr)),                         \
183          "i" (ret))
184
185 extern int __put_user_bad(void);
186
187 #define __get_user_nocheck(data,addr,size,type) ({ \
188 register int __gu_ret; \
189 register unsigned long __gu_val; \
190 switch (size) { \
191 case 1: __get_user_asm(__gu_val,ub,addr,__gu_ret); break; \
192 case 2: __get_user_asm(__gu_val,uh,addr,__gu_ret); break; \
193 case 4: __get_user_asm(__gu_val,uw,addr,__gu_ret); break; \
194 case 8: __get_user_asm(__gu_val,x,addr,__gu_ret); break; \
195 default: __gu_val = 0; __gu_ret = __get_user_bad(); break; \
196 } data = (type) __gu_val; __gu_ret; })
197
198 #define __get_user_nocheck_ret(data,addr,size,type,retval) ({ \
199 register unsigned long __gu_val __asm__ ("l1"); \
200 switch (size) { \
201 case 1: __get_user_asm_ret(__gu_val,ub,addr,retval); break; \
202 case 2: __get_user_asm_ret(__gu_val,uh,addr,retval); break; \
203 case 4: __get_user_asm_ret(__gu_val,uw,addr,retval); break; \
204 case 8: __get_user_asm_ret(__gu_val,x,addr,retval); break; \
205 default: if (__get_user_bad()) return retval; \
206 } data = (type) __gu_val; })
207
208 #define __get_user_asm(x,size,addr,ret)                                 \
209 __asm__ __volatile__(                                                   \
210         "/* Get user asm, inline. */\n"                                 \
211 "1:\t"  "ld"#size "a [%2] %%asi, %1\n\t"                                \
212         "clr    %0\n"                                                   \
213 "2:\n\n\t"                                                              \
214         ".section .fixup,#alloc,#execinstr\n\t"                         \
215         ".align 4\n"                                                    \
216 "3:\n\t"                                                                \
217         "clr    %1\n\t"                                                 \
218         "b      2b\n\t"                                                 \
219         " mov   %3, %0\n\n\t"                                           \
220         ".previous\n\t"                                                 \
221         ".section __ex_table,#alloc\n\t"                                \
222         ".align 4\n\t"                                                  \
223         ".word  1b, 3b\n\n\t"                                           \
224         ".previous\n\t"                                                 \
225        : "=r" (ret), "=r" (x) : "r" (__m(addr)),                        \
226          "i" (-EFAULT))
227
228 #define __get_user_asm_ret(x,size,addr,retval)                          \
229 if (__builtin_constant_p(retval) && retval == -EFAULT)                  \
230 __asm__ __volatile__(                                                   \
231         "/* Get user asm ret, inline. */\n"                             \
232 "1:\t"  "ld"#size "a [%1] %%asi, %0\n\n\t"                              \
233         ".section __ex_table,#alloc\n\t"                                \
234         ".align 4\n\t"                                                  \
235         ".word  1b,__ret_efault\n\n\t"                                  \
236         ".previous\n\t"                                                 \
237        : "=r" (x) : "r" (__m(addr)));                                   \
238 else                                                                    \
239 __asm__ __volatile__(                                                   \
240         "/* Get user asm ret, inline. */\n"                             \
241 "1:\t"  "ld"#size "a [%1] %%asi, %0\n\n\t"                              \
242         ".section .fixup,#alloc,#execinstr\n\t"                         \
243         ".align 4\n"                                                    \
244 "3:\n\t"                                                                \
245         "ret\n\t"                                                       \
246         " restore %%g0, %2, %%o0\n\n\t"                                 \
247         ".previous\n\t"                                                 \
248         ".section __ex_table,#alloc\n\t"                                \
249         ".align 4\n\t"                                                  \
250         ".word  1b, 3b\n\n\t"                                           \
251         ".previous\n\t"                                                 \
252        : "=r" (x) : "r" (__m(addr)), "i" (retval))
253
254 extern int __get_user_bad(void);
255
256 extern unsigned long ___copy_from_user(void *to, const void __user *from,
257                                        unsigned long size);
258 extern unsigned long copy_from_user_fixup(void *to, const void __user *from,
259                                           unsigned long size);
260 static inline unsigned long copy_from_user(void *to, const void __user *from,
261                                            unsigned long size)
262 {
263         unsigned long ret = ___copy_from_user(to, from, size);
264
265         if (ret)
266                 ret = copy_from_user_fixup(to, from, size);
267         return ret;
268 }
269 #define __copy_from_user copy_from_user
270
271 extern unsigned long ___copy_to_user(void __user *to, const void *from,
272                                      unsigned long size);
273 extern unsigned long copy_to_user_fixup(void __user *to, const void *from,
274                                         unsigned long size);
275 static inline unsigned long copy_to_user(void __user *to, const void *from,
276                                          unsigned long size)
277 {
278         unsigned long ret = ___copy_to_user(to, from, size);
279
280         if (ret)
281                 ret = copy_to_user_fixup(to, from, size);
282         return ret;
283 }
284 #define __copy_to_user copy_to_user
285
286 extern unsigned long ___copy_in_user(void __user *to, const void __user *from,
287                                      unsigned long size);
288 extern unsigned long copy_in_user_fixup(void __user *to, void __user *from,
289                                         unsigned long size);
290 static inline unsigned long copy_in_user(void __user *to, void __user *from,
291                                          unsigned long size)
292 {
293         unsigned long ret = ___copy_in_user(to, from, size);
294
295         if (ret)
296                 ret = copy_in_user_fixup(to, from, size);
297         return ret;
298 }
299 #define __copy_in_user copy_in_user
300
301 extern unsigned long __bzero_noasi(void __user *, unsigned long);
302
303 static inline unsigned long __clear_user(void __user *addr, unsigned long size)
304 {
305         
306         return __bzero_noasi(addr, size);
307 }
308
309 #define clear_user __clear_user
310
311 extern long __strncpy_from_user(char *dest, const char __user *src, long count);
312
313 #define strncpy_from_user __strncpy_from_user
314
315 extern long __strlen_user(const char __user *);
316 extern long __strnlen_user(const char __user *, long len);
317
318 #define strlen_user __strlen_user
319 #define strnlen_user __strnlen_user
320 #define __copy_to_user_inatomic __copy_to_user
321 #define __copy_from_user_inatomic __copy_from_user
322
323 #endif  /* __ASSEMBLY__ */
324
325 #endif /* _ASM_UACCESS_H */