patch-2_6_7-vs1_9_1_12
[linux-2.6.git] / include / asm-alpha / uaccess.h
1 #ifndef __ALPHA_UACCESS_H
2 #define __ALPHA_UACCESS_H
3
4 #include <linux/errno.h>
5 #include <linux/sched.h>
6
7
8 /*
9  * The fs value determines whether argument validity checking should be
10  * performed or not.  If get_fs() == USER_DS, checking is performed, with
11  * get_fs() == KERNEL_DS, checking is bypassed.
12  *
13  * Or at least it did once upon a time.  Nowadays it is a mask that
14  * defines which bits of the address space are off limits.  This is a
15  * wee bit faster than the above.
16  *
17  * For historical reasons, these macros are grossly misnamed.
18  */
19
20 #define KERNEL_DS       ((mm_segment_t) { 0UL })
21 #define USER_DS         ((mm_segment_t) { -0x40000000000UL })
22
23 #define VERIFY_READ     0
24 #define VERIFY_WRITE    1
25
26 #define get_fs()  (current_thread_info()->addr_limit)
27 #define get_ds()  (KERNEL_DS)
28 #define set_fs(x) (current_thread_info()->addr_limit = (x))
29
30 #define segment_eq(a,b) ((a).seg == (b).seg)
31
32 #ifdef __CHECKER__
33 #define CHECK_UPTR(ptr) do {                            \
34         __typeof__(*(ptr)) *__dummy_check_uptr =        \
35                 (void __user *)&__dummy_check_uptr;     \
36 } while(0)
37 #else
38 #define CHECK_UPTR(ptr)
39 #endif
40
41 /*
42  * Is a address valid? This does a straightforward calculation rather
43  * than tests.
44  *
45  * Address valid if:
46  *  - "addr" doesn't have any high-bits set
47  *  - AND "size" doesn't have any high-bits set
48  *  - AND "addr+size" doesn't have any high-bits set
49  *  - OR we are in kernel mode.
50  */
51 #define __access_ok(addr,size,segment) \
52         (((segment).seg & (addr | size | (addr+size))) == 0)
53
54 #define access_ok(type,addr,size)                               \
55 ({                                                              \
56         CHECK_UPTR(addr);                                       \
57         __access_ok(((unsigned long)(addr)),(size),get_fs());   \
58 })
59
60 extern inline int verify_area(int type, const void __user * addr, unsigned long size)
61 {
62         return access_ok(type,addr,size) ? 0 : -EFAULT;
63 }
64
65 /*
66  * These are the main single-value transfer routines.  They automatically
67  * use the right size if we just have the right pointer type.
68  *
69  * As the alpha uses the same address space for kernel and user
70  * data, we can just do these as direct assignments.  (Of course, the
71  * exception handling means that it's no longer "just"...)
72  *
73  * Careful to not
74  * (a) re-use the arguments for side effects (sizeof/typeof is ok)
75  * (b) require any knowledge of processes at this stage
76  */
77 #define put_user(x,ptr) \
78   __put_user_check((__typeof__(*(ptr)))(x),(ptr),sizeof(*(ptr)),get_fs())
79 #define get_user(x,ptr) \
80   __get_user_check((x),(ptr),sizeof(*(ptr)),get_fs())
81
82 /*
83  * The "__xxx" versions do not do address space checking, useful when
84  * doing multiple accesses to the same area (the programmer has to do the
85  * checks by hand with "access_ok()")
86  */
87 #define __put_user(x,ptr) \
88   __put_user_nocheck((__typeof__(*(ptr)))(x),(ptr),sizeof(*(ptr)))
89 #define __get_user(x,ptr) \
90   __get_user_nocheck((x),(ptr),sizeof(*(ptr)))
91   
92 /*
93  * The "lda %1, 2b-1b(%0)" bits are magic to get the assembler to
94  * encode the bits we need for resolving the exception.  See the
95  * more extensive comments with fixup_inline_exception below for
96  * more information.
97  */
98
99 extern void __get_user_unknown(void);
100
101 #define __get_user_nocheck(x,ptr,size)                          \
102 ({                                                              \
103         long __gu_err = 0, __gu_val;                            \
104         CHECK_UPTR(ptr);                                        \
105         switch (size) {                                         \
106           case 1: __get_user_8(ptr); break;                     \
107           case 2: __get_user_16(ptr); break;                    \
108           case 4: __get_user_32(ptr); break;                    \
109           case 8: __get_user_64(ptr); break;                    \
110           default: __get_user_unknown(); break;                 \
111         }                                                       \
112         (x) = (__typeof__(*(ptr))) __gu_val;                    \
113         __gu_err;                                               \
114 })
115
116 #define __get_user_check(x,ptr,size,segment)                    \
117 ({                                                              \
118         long __gu_err = -EFAULT, __gu_val = 0;                  \
119         const __typeof__(*(ptr)) *__gu_addr = (ptr);            \
120         CHECK_UPTR(ptr);                                        \
121         if (__access_ok((long)__gu_addr,size,segment)) {        \
122                 __gu_err = 0;                                   \
123                 switch (size) {                                 \
124                   case 1: __get_user_8(__gu_addr); break;       \
125                   case 2: __get_user_16(__gu_addr); break;      \
126                   case 4: __get_user_32(__gu_addr); break;      \
127                   case 8: __get_user_64(__gu_addr); break;      \
128                   default: __get_user_unknown(); break;         \
129                 }                                               \
130         }                                                       \
131         (x) = (__typeof__(*(ptr))) __gu_val;                    \
132         __gu_err;                                               \
133 })
134
135 struct __large_struct { unsigned long buf[100]; };
136 #define __m(x) (*(struct __large_struct *)(x))
137
138 #define __get_user_64(addr)                             \
139         __asm__("1: ldq %0,%2\n"                        \
140         "2:\n"                                          \
141         ".section __ex_table,\"a\"\n"                   \
142         "       .long 1b - .\n"                         \
143         "       lda %0, 2b-1b(%1)\n"                    \
144         ".previous"                                     \
145                 : "=r"(__gu_val), "=r"(__gu_err)        \
146                 : "m"(__m(addr)), "1"(__gu_err))
147
148 #define __get_user_32(addr)                             \
149         __asm__("1: ldl %0,%2\n"                        \
150         "2:\n"                                          \
151         ".section __ex_table,\"a\"\n"                   \
152         "       .long 1b - .\n"                         \
153         "       lda %0, 2b-1b(%1)\n"                    \
154         ".previous"                                     \
155                 : "=r"(__gu_val), "=r"(__gu_err)        \
156                 : "m"(__m(addr)), "1"(__gu_err))
157
158 #ifdef __alpha_bwx__
159 /* Those lucky bastards with ev56 and later CPUs can do byte/word moves.  */
160
161 #define __get_user_16(addr)                             \
162         __asm__("1: ldwu %0,%2\n"                       \
163         "2:\n"                                          \
164         ".section __ex_table,\"a\"\n"                   \
165         "       .long 1b - .\n"                         \
166         "       lda %0, 2b-1b(%1)\n"                    \
167         ".previous"                                     \
168                 : "=r"(__gu_val), "=r"(__gu_err)        \
169                 : "m"(__m(addr)), "1"(__gu_err))
170
171 #define __get_user_8(addr)                              \
172         __asm__("1: ldbu %0,%2\n"                       \
173         "2:\n"                                          \
174         ".section __ex_table,\"a\"\n"                   \
175         "       .long 1b - .\n"                         \
176         "       lda %0, 2b-1b(%1)\n"                    \
177         ".previous"                                     \
178                 : "=r"(__gu_val), "=r"(__gu_err)        \
179                 : "m"(__m(addr)), "1"(__gu_err))
180 #else
181 /* Unfortunately, we can't get an unaligned access trap for the sub-word
182    load, so we have to do a general unaligned operation.  */
183
184 #define __get_user_16(addr)                                             \
185 {                                                                       \
186         long __gu_tmp;                                                  \
187         __asm__("1: ldq_u %0,0(%3)\n"                                   \
188         "2:     ldq_u %1,1(%3)\n"                                       \
189         "       extwl %0,%3,%0\n"                                       \
190         "       extwh %1,%3,%1\n"                                       \
191         "       or %0,%1,%0\n"                                          \
192         "3:\n"                                                          \
193         ".section __ex_table,\"a\"\n"                                   \
194         "       .long 1b - .\n"                                         \
195         "       lda %0, 3b-1b(%2)\n"                                    \
196         "       .long 2b - .\n"                                         \
197         "       lda %0, 3b-2b(%2)\n"                                    \
198         ".previous"                                                     \
199                 : "=&r"(__gu_val), "=&r"(__gu_tmp), "=r"(__gu_err)      \
200                 : "r"(addr), "2"(__gu_err));                            \
201 }
202
203 #define __get_user_8(addr)                                              \
204         __asm__("1: ldq_u %0,0(%2)\n"                                   \
205         "       extbl %0,%2,%0\n"                                       \
206         "2:\n"                                                          \
207         ".section __ex_table,\"a\"\n"                                   \
208         "       .long 1b - .\n"                                         \
209         "       lda %0, 2b-1b(%1)\n"                                    \
210         ".previous"                                                     \
211                 : "=&r"(__gu_val), "=r"(__gu_err)                       \
212                 : "r"(addr), "1"(__gu_err))
213 #endif
214
215 extern void __put_user_unknown(void);
216
217 #define __put_user_nocheck(x,ptr,size)                          \
218 ({                                                              \
219         long __pu_err = 0;                                      \
220         CHECK_UPTR(ptr);                                        \
221         switch (size) {                                         \
222           case 1: __put_user_8(x,ptr); break;                   \
223           case 2: __put_user_16(x,ptr); break;                  \
224           case 4: __put_user_32(x,ptr); break;                  \
225           case 8: __put_user_64(x,ptr); break;                  \
226           default: __put_user_unknown(); break;                 \
227         }                                                       \
228         __pu_err;                                               \
229 })
230
231 #define __put_user_check(x,ptr,size,segment)                    \
232 ({                                                              \
233         long __pu_err = -EFAULT;                                \
234         __typeof__(*(ptr)) *__pu_addr = (ptr);                  \
235         CHECK_UPTR(ptr);                                        \
236         if (__access_ok((long)__pu_addr,size,segment)) {        \
237                 __pu_err = 0;                                   \
238                 switch (size) {                                 \
239                   case 1: __put_user_8(x,__pu_addr); break;     \
240                   case 2: __put_user_16(x,__pu_addr); break;    \
241                   case 4: __put_user_32(x,__pu_addr); break;    \
242                   case 8: __put_user_64(x,__pu_addr); break;    \
243                   default: __put_user_unknown(); break;         \
244                 }                                               \
245         }                                                       \
246         __pu_err;                                               \
247 })
248
249 /*
250  * The "__put_user_xx()" macros tell gcc they read from memory
251  * instead of writing: this is because they do not write to
252  * any memory gcc knows about, so there are no aliasing issues
253  */
254 #define __put_user_64(x,addr)                                   \
255 __asm__ __volatile__("1: stq %r2,%1\n"                          \
256         "2:\n"                                                  \
257         ".section __ex_table,\"a\"\n"                           \
258         "       .long 1b - .\n"                                 \
259         "       lda $31,2b-1b(%0)\n"                            \
260         ".previous"                                             \
261                 : "=r"(__pu_err)                                \
262                 : "m" (__m(addr)), "rJ" (x), "0"(__pu_err))
263
264 #define __put_user_32(x,addr)                                   \
265 __asm__ __volatile__("1: stl %r2,%1\n"                          \
266         "2:\n"                                                  \
267         ".section __ex_table,\"a\"\n"                           \
268         "       .long 1b - .\n"                                 \
269         "       lda $31,2b-1b(%0)\n"                            \
270         ".previous"                                             \
271                 : "=r"(__pu_err)                                \
272                 : "m"(__m(addr)), "rJ"(x), "0"(__pu_err))
273
274 #ifdef __alpha_bwx__
275 /* Those lucky bastards with ev56 and later CPUs can do byte/word moves.  */
276
277 #define __put_user_16(x,addr)                                   \
278 __asm__ __volatile__("1: stw %r2,%1\n"                          \
279         "2:\n"                                                  \
280         ".section __ex_table,\"a\"\n"                           \
281         "       .long 1b - .\n"                                 \
282         "       lda $31,2b-1b(%0)\n"                            \
283         ".previous"                                             \
284                 : "=r"(__pu_err)                                \
285                 : "m"(__m(addr)), "rJ"(x), "0"(__pu_err))
286
287 #define __put_user_8(x,addr)                                    \
288 __asm__ __volatile__("1: stb %r2,%1\n"                          \
289         "2:\n"                                                  \
290         ".section __ex_table,\"a\"\n"                           \
291         "       .long 1b - .\n"                                 \
292         "       lda $31,2b-1b(%0)\n"                            \
293         ".previous"                                             \
294                 : "=r"(__pu_err)                                \
295                 : "m"(__m(addr)), "rJ"(x), "0"(__pu_err))
296 #else
297 /* Unfortunately, we can't get an unaligned access trap for the sub-word
298    write, so we have to do a general unaligned operation.  */
299
300 #define __put_user_16(x,addr)                                   \
301 {                                                               \
302         long __pu_tmp1, __pu_tmp2, __pu_tmp3, __pu_tmp4;        \
303         __asm__ __volatile__(                                   \
304         "1:     ldq_u %2,1(%5)\n"                               \
305         "2:     ldq_u %1,0(%5)\n"                               \
306         "       inswh %6,%5,%4\n"                               \
307         "       inswl %6,%5,%3\n"                               \
308         "       mskwh %2,%5,%2\n"                               \
309         "       mskwl %1,%5,%1\n"                               \
310         "       or %2,%4,%2\n"                                  \
311         "       or %1,%3,%1\n"                                  \
312         "3:     stq_u %2,1(%5)\n"                               \
313         "4:     stq_u %1,0(%5)\n"                               \
314         "5:\n"                                                  \
315         ".section __ex_table,\"a\"\n"                           \
316         "       .long 1b - .\n"                                 \
317         "       lda $31, 5b-1b(%0)\n"                           \
318         "       .long 2b - .\n"                                 \
319         "       lda $31, 5b-2b(%0)\n"                           \
320         "       .long 3b - .\n"                                 \
321         "       lda $31, 5b-3b(%0)\n"                           \
322         "       .long 4b - .\n"                                 \
323         "       lda $31, 5b-4b(%0)\n"                           \
324         ".previous"                                             \
325                 : "=r"(__pu_err), "=&r"(__pu_tmp1),             \
326                   "=&r"(__pu_tmp2), "=&r"(__pu_tmp3),           \
327                   "=&r"(__pu_tmp4)                              \
328                 : "r"(addr), "r"((unsigned long)(x)), "0"(__pu_err)); \
329 }
330
331 #define __put_user_8(x,addr)                                    \
332 {                                                               \
333         long __pu_tmp1, __pu_tmp2;                              \
334         __asm__ __volatile__(                                   \
335         "1:     ldq_u %1,0(%4)\n"                               \
336         "       insbl %3,%4,%2\n"                               \
337         "       mskbl %1,%4,%1\n"                               \
338         "       or %1,%2,%1\n"                                  \
339         "2:     stq_u %1,0(%4)\n"                               \
340         "3:\n"                                                  \
341         ".section __ex_table,\"a\"\n"                           \
342         "       .long 1b - .\n"                                 \
343         "       lda $31, 3b-1b(%0)\n"                           \
344         "       .long 2b - .\n"                                 \
345         "       lda $31, 3b-2b(%0)\n"                           \
346         ".previous"                                             \
347                 : "=r"(__pu_err),                               \
348                   "=&r"(__pu_tmp1), "=&r"(__pu_tmp2)            \
349                 : "r"((unsigned long)(x)), "r"(addr), "0"(__pu_err)); \
350 }
351 #endif
352
353
354 /*
355  * Complex access routines
356  */
357
358 /* This little bit of silliness is to get the GP loaded for a function
359    that ordinarily wouldn't.  Otherwise we could have it done by the macro
360    directly, which can be optimized the linker.  */
361 #ifdef MODULE
362 #define __module_address(sym)           "r"(sym),
363 #define __module_call(ra, arg, sym)     "jsr $" #ra ",(%" #arg ")," #sym
364 #else
365 #define __module_address(sym)
366 #define __module_call(ra, arg, sym)     "bsr $" #ra "," #sym " !samegp"
367 #endif
368
369 extern void __copy_user(void);
370
371 extern inline long
372 __copy_tofrom_user_nocheck(void *to, const void *from, long len)
373 {
374         register void * __cu_to __asm__("$6") = to;
375         register const void * __cu_from __asm__("$7") = from;
376         register long __cu_len __asm__("$0") = len;
377
378         __asm__ __volatile__(
379                 __module_call(28, 3, __copy_user)
380                 : "=r" (__cu_len), "=r" (__cu_from), "=r" (__cu_to)
381                 : __module_address(__copy_user)
382                   "0" (__cu_len), "1" (__cu_from), "2" (__cu_to)
383                 : "$1","$2","$3","$4","$5","$28","memory");
384
385         return __cu_len;
386 }
387
388 extern inline long
389 __copy_tofrom_user(void *to, const void *from, long len, const void __user *validate)
390 {
391         if (__access_ok((long)validate, len, get_fs()))
392                 len = __copy_tofrom_user_nocheck(to, from, len);
393         return len;
394 }
395
396 #define __copy_to_user(to,from,n)                               \
397 ({                                                              \
398         CHECK_UPTR(to);                                         \
399         __copy_tofrom_user_nocheck((void *)(to),(from),(n));    \
400 })
401 #define __copy_from_user(to,from,n)                             \
402 ({                                                              \
403         CHECK_UPTR(from);                                       \
404         __copy_tofrom_user_nocheck((to),(void *)(from),(n));    \
405 })
406
407 extern inline long
408 copy_to_user(void __user *to, const void *from, long n)
409 {
410         return __copy_tofrom_user((void *)to, from, n, to);
411 }
412
413 extern inline long
414 copy_from_user(void *to, const void __user *from, long n)
415 {
416         return __copy_tofrom_user(to, (void *)from, n, from);
417 }
418
419 extern void __do_clear_user(void);
420
421 extern inline long
422 __clear_user(void __user *to, long len)
423 {
424         register void __user * __cl_to __asm__("$6") = to;
425         register long __cl_len __asm__("$0") = len;
426         __asm__ __volatile__(
427                 __module_call(28, 2, __do_clear_user)
428                 : "=r"(__cl_len), "=r"(__cl_to)
429                 : __module_address(__do_clear_user)
430                   "0"(__cl_len), "1"(__cl_to)
431                 : "$1","$2","$3","$4","$5","$28","memory");
432         return __cl_len;
433 }
434
435 extern inline long
436 clear_user(void __user *to, long len)
437 {
438         if (__access_ok((long)to, len, get_fs()))
439                 len = __clear_user(to, len);
440         return len;
441 }
442
443 #undef __module_address
444 #undef __module_call
445
446 /* Returns: -EFAULT if exception before terminator, N if the entire
447    buffer filled, else strlen.  */
448
449 extern long __strncpy_from_user(char *__to, const char __user *__from, long __to_len);
450
451 extern inline long
452 strncpy_from_user(char *to, const char __user *from, long n)
453 {
454         long ret = -EFAULT;
455         if (__access_ok((long)from, 0, get_fs()))
456                 ret = __strncpy_from_user(to, from, n);
457         return ret;
458 }
459
460 /* Returns: 0 if bad, string length+1 (memory size) of string if ok */
461 extern long __strlen_user(const char __user *);
462
463 extern inline long strlen_user(const char __user *str)
464 {
465         return access_ok(VERIFY_READ,str,0) ? __strlen_user(str) : 0;
466 }
467
468 /* Returns: 0 if exception before NUL or reaching the supplied limit (N),
469  * a value greater than N if the limit would be exceeded, else strlen.  */
470 extern long __strnlen_user(const char __user *, long);
471
472 extern inline long strnlen_user(const char __user *str, long n)
473 {
474         return access_ok(VERIFY_READ,str,0) ? __strnlen_user(str, n) : 0;
475 }
476
477 /*
478  * About the exception table:
479  *
480  * - insn is a 32-bit pc-relative offset from the faulting insn.
481  * - nextinsn is a 16-bit offset off of the faulting instruction
482  *   (not off of the *next* instruction as branches are).
483  * - errreg is the register in which to place -EFAULT.
484  * - valreg is the final target register for the load sequence
485  *   and will be zeroed.
486  *
487  * Either errreg or valreg may be $31, in which case nothing happens.
488  *
489  * The exception fixup information "just so happens" to be arranged
490  * as in a MEM format instruction.  This lets us emit our three
491  * values like so:
492  *
493  *      lda valreg, nextinsn(errreg)
494  *
495  */
496
497 struct exception_table_entry
498 {
499         signed int insn;
500         union exception_fixup {
501                 unsigned unit;
502                 struct {
503                         signed int nextinsn : 16;
504                         unsigned int errreg : 5;
505                         unsigned int valreg : 5;
506                 } bits;
507         } fixup;
508 };
509
510 /* Returns the new pc */
511 #define fixup_exception(map_reg, fixup, pc)                     \
512 ({                                                              \
513         if ((fixup)->fixup.bits.valreg != 31)                   \
514                 map_reg((fixup)->fixup.bits.valreg) = 0;        \
515         if ((fixup)->fixup.bits.errreg != 31)                   \
516                 map_reg((fixup)->fixup.bits.errreg) = -EFAULT;  \
517         (pc) + (fixup)->fixup.bits.nextinsn;                    \
518 })
519
520
521 #endif /* __ALPHA_UACCESS_H */