1 #ifndef __ALPHA_UACCESS_H
2 #define __ALPHA_UACCESS_H
4 #include <linux/errno.h>
5 #include <linux/sched.h>
9 * The fs value determines whether argument validity checking should be
10 * performed or not. If get_fs() == USER_DS, checking is performed, with
11 * get_fs() == KERNEL_DS, checking is bypassed.
13 * Or at least it did once upon a time. Nowadays it is a mask that
14 * defines which bits of the address space are off limits. This is a
15 * wee bit faster than the above.
17 * For historical reasons, these macros are grossly misnamed.
20 #define KERNEL_DS ((mm_segment_t) { 0UL })
21 #define USER_DS ((mm_segment_t) { -0x40000000000UL })
24 #define VERIFY_WRITE 1
26 #define get_fs() (current_thread_info()->addr_limit)
27 #define get_ds() (KERNEL_DS)
28 #define set_fs(x) (current_thread_info()->addr_limit = (x))
30 #define segment_eq(a,b) ((a).seg == (b).seg)
33 #define CHECK_UPTR(ptr) do { \
34 __typeof__(*(ptr)) *__dummy_check_uptr = \
35 (void __user *)&__dummy_check_uptr; \
38 #define CHECK_UPTR(ptr)
42 * Is a address valid? This does a straightforward calculation rather
46 * - "addr" doesn't have any high-bits set
47 * - AND "size" doesn't have any high-bits set
48 * - AND "addr+size" doesn't have any high-bits set
49 * - OR we are in kernel mode.
51 #define __access_ok(addr,size,segment) \
52 (((segment).seg & (addr | size | (addr+size))) == 0)
54 #define access_ok(type,addr,size) \
57 __access_ok(((unsigned long)(addr)),(size),get_fs()); \
60 extern inline int verify_area(int type, const void __user * addr, unsigned long size)
62 return access_ok(type,addr,size) ? 0 : -EFAULT;
66 * These are the main single-value transfer routines. They automatically
67 * use the right size if we just have the right pointer type.
69 * As the alpha uses the same address space for kernel and user
70 * data, we can just do these as direct assignments. (Of course, the
71 * exception handling means that it's no longer "just"...)
74 * (a) re-use the arguments for side effects (sizeof/typeof is ok)
75 * (b) require any knowledge of processes at this stage
77 #define put_user(x,ptr) \
78 __put_user_check((__typeof__(*(ptr)))(x),(ptr),sizeof(*(ptr)),get_fs())
79 #define get_user(x,ptr) \
80 __get_user_check((x),(ptr),sizeof(*(ptr)),get_fs())
83 * The "__xxx" versions do not do address space checking, useful when
84 * doing multiple accesses to the same area (the programmer has to do the
85 * checks by hand with "access_ok()")
87 #define __put_user(x,ptr) \
88 __put_user_nocheck((__typeof__(*(ptr)))(x),(ptr),sizeof(*(ptr)))
89 #define __get_user(x,ptr) \
90 __get_user_nocheck((x),(ptr),sizeof(*(ptr)))
93 * The "lda %1, 2b-1b(%0)" bits are magic to get the assembler to
94 * encode the bits we need for resolving the exception. See the
95 * more extensive comments with fixup_inline_exception below for
99 extern void __get_user_unknown(void);
101 #define __get_user_nocheck(x,ptr,size) \
103 long __gu_err = 0, __gu_val; \
106 case 1: __get_user_8(ptr); break; \
107 case 2: __get_user_16(ptr); break; \
108 case 4: __get_user_32(ptr); break; \
109 case 8: __get_user_64(ptr); break; \
110 default: __get_user_unknown(); break; \
112 (x) = (__typeof__(*(ptr))) __gu_val; \
116 #define __get_user_check(x,ptr,size,segment) \
118 long __gu_err = -EFAULT, __gu_val = 0; \
119 const __typeof__(*(ptr)) *__gu_addr = (ptr); \
121 if (__access_ok((long)__gu_addr,size,segment)) { \
124 case 1: __get_user_8(__gu_addr); break; \
125 case 2: __get_user_16(__gu_addr); break; \
126 case 4: __get_user_32(__gu_addr); break; \
127 case 8: __get_user_64(__gu_addr); break; \
128 default: __get_user_unknown(); break; \
131 (x) = (__typeof__(*(ptr))) __gu_val; \
135 struct __large_struct { unsigned long buf[100]; };
136 #define __m(x) (*(struct __large_struct *)(x))
138 #define __get_user_64(addr) \
139 __asm__("1: ldq %0,%2\n" \
141 ".section __ex_table,\"a\"\n" \
143 " lda %0, 2b-1b(%1)\n" \
145 : "=r"(__gu_val), "=r"(__gu_err) \
146 : "m"(__m(addr)), "1"(__gu_err))
148 #define __get_user_32(addr) \
149 __asm__("1: ldl %0,%2\n" \
151 ".section __ex_table,\"a\"\n" \
153 " lda %0, 2b-1b(%1)\n" \
155 : "=r"(__gu_val), "=r"(__gu_err) \
156 : "m"(__m(addr)), "1"(__gu_err))
159 /* Those lucky bastards with ev56 and later CPUs can do byte/word moves. */
161 #define __get_user_16(addr) \
162 __asm__("1: ldwu %0,%2\n" \
164 ".section __ex_table,\"a\"\n" \
166 " lda %0, 2b-1b(%1)\n" \
168 : "=r"(__gu_val), "=r"(__gu_err) \
169 : "m"(__m(addr)), "1"(__gu_err))
171 #define __get_user_8(addr) \
172 __asm__("1: ldbu %0,%2\n" \
174 ".section __ex_table,\"a\"\n" \
176 " lda %0, 2b-1b(%1)\n" \
178 : "=r"(__gu_val), "=r"(__gu_err) \
179 : "m"(__m(addr)), "1"(__gu_err))
181 /* Unfortunately, we can't get an unaligned access trap for the sub-word
182 load, so we have to do a general unaligned operation. */
184 #define __get_user_16(addr) \
187 __asm__("1: ldq_u %0,0(%3)\n" \
188 "2: ldq_u %1,1(%3)\n" \
189 " extwl %0,%3,%0\n" \
190 " extwh %1,%3,%1\n" \
193 ".section __ex_table,\"a\"\n" \
195 " lda %0, 3b-1b(%2)\n" \
197 " lda %0, 3b-2b(%2)\n" \
199 : "=&r"(__gu_val), "=&r"(__gu_tmp), "=r"(__gu_err) \
200 : "r"(addr), "2"(__gu_err)); \
203 #define __get_user_8(addr) \
204 __asm__("1: ldq_u %0,0(%2)\n" \
205 " extbl %0,%2,%0\n" \
207 ".section __ex_table,\"a\"\n" \
209 " lda %0, 2b-1b(%1)\n" \
211 : "=&r"(__gu_val), "=r"(__gu_err) \
212 : "r"(addr), "1"(__gu_err))
215 extern void __put_user_unknown(void);
217 #define __put_user_nocheck(x,ptr,size) \
222 case 1: __put_user_8(x,ptr); break; \
223 case 2: __put_user_16(x,ptr); break; \
224 case 4: __put_user_32(x,ptr); break; \
225 case 8: __put_user_64(x,ptr); break; \
226 default: __put_user_unknown(); break; \
231 #define __put_user_check(x,ptr,size,segment) \
233 long __pu_err = -EFAULT; \
234 __typeof__(*(ptr)) *__pu_addr = (ptr); \
236 if (__access_ok((long)__pu_addr,size,segment)) { \
239 case 1: __put_user_8(x,__pu_addr); break; \
240 case 2: __put_user_16(x,__pu_addr); break; \
241 case 4: __put_user_32(x,__pu_addr); break; \
242 case 8: __put_user_64(x,__pu_addr); break; \
243 default: __put_user_unknown(); break; \
250 * The "__put_user_xx()" macros tell gcc they read from memory
251 * instead of writing: this is because they do not write to
252 * any memory gcc knows about, so there are no aliasing issues
254 #define __put_user_64(x,addr) \
255 __asm__ __volatile__("1: stq %r2,%1\n" \
257 ".section __ex_table,\"a\"\n" \
259 " lda $31,2b-1b(%0)\n" \
262 : "m" (__m(addr)), "rJ" (x), "0"(__pu_err))
264 #define __put_user_32(x,addr) \
265 __asm__ __volatile__("1: stl %r2,%1\n" \
267 ".section __ex_table,\"a\"\n" \
269 " lda $31,2b-1b(%0)\n" \
272 : "m"(__m(addr)), "rJ"(x), "0"(__pu_err))
275 /* Those lucky bastards with ev56 and later CPUs can do byte/word moves. */
277 #define __put_user_16(x,addr) \
278 __asm__ __volatile__("1: stw %r2,%1\n" \
280 ".section __ex_table,\"a\"\n" \
282 " lda $31,2b-1b(%0)\n" \
285 : "m"(__m(addr)), "rJ"(x), "0"(__pu_err))
287 #define __put_user_8(x,addr) \
288 __asm__ __volatile__("1: stb %r2,%1\n" \
290 ".section __ex_table,\"a\"\n" \
292 " lda $31,2b-1b(%0)\n" \
295 : "m"(__m(addr)), "rJ"(x), "0"(__pu_err))
297 /* Unfortunately, we can't get an unaligned access trap for the sub-word
298 write, so we have to do a general unaligned operation. */
300 #define __put_user_16(x,addr) \
302 long __pu_tmp1, __pu_tmp2, __pu_tmp3, __pu_tmp4; \
303 __asm__ __volatile__( \
304 "1: ldq_u %2,1(%5)\n" \
305 "2: ldq_u %1,0(%5)\n" \
306 " inswh %6,%5,%4\n" \
307 " inswl %6,%5,%3\n" \
308 " mskwh %2,%5,%2\n" \
309 " mskwl %1,%5,%1\n" \
312 "3: stq_u %2,1(%5)\n" \
313 "4: stq_u %1,0(%5)\n" \
315 ".section __ex_table,\"a\"\n" \
317 " lda $31, 5b-1b(%0)\n" \
319 " lda $31, 5b-2b(%0)\n" \
321 " lda $31, 5b-3b(%0)\n" \
323 " lda $31, 5b-4b(%0)\n" \
325 : "=r"(__pu_err), "=&r"(__pu_tmp1), \
326 "=&r"(__pu_tmp2), "=&r"(__pu_tmp3), \
328 : "r"(addr), "r"((unsigned long)(x)), "0"(__pu_err)); \
331 #define __put_user_8(x,addr) \
333 long __pu_tmp1, __pu_tmp2; \
334 __asm__ __volatile__( \
335 "1: ldq_u %1,0(%4)\n" \
336 " insbl %3,%4,%2\n" \
337 " mskbl %1,%4,%1\n" \
339 "2: stq_u %1,0(%4)\n" \
341 ".section __ex_table,\"a\"\n" \
343 " lda $31, 3b-1b(%0)\n" \
345 " lda $31, 3b-2b(%0)\n" \
348 "=&r"(__pu_tmp1), "=&r"(__pu_tmp2) \
349 : "r"((unsigned long)(x)), "r"(addr), "0"(__pu_err)); \
355 * Complex access routines
358 /* This little bit of silliness is to get the GP loaded for a function
359 that ordinarily wouldn't. Otherwise we could have it done by the macro
360 directly, which can be optimized the linker. */
362 #define __module_address(sym) "r"(sym),
363 #define __module_call(ra, arg, sym) "jsr $" #ra ",(%" #arg ")," #sym
365 #define __module_address(sym)
366 #define __module_call(ra, arg, sym) "bsr $" #ra "," #sym " !samegp"
369 extern void __copy_user(void);
372 __copy_tofrom_user_nocheck(void *to, const void *from, long len)
374 register void * __cu_to __asm__("$6") = to;
375 register const void * __cu_from __asm__("$7") = from;
376 register long __cu_len __asm__("$0") = len;
378 __asm__ __volatile__(
379 __module_call(28, 3, __copy_user)
380 : "=r" (__cu_len), "=r" (__cu_from), "=r" (__cu_to)
381 : __module_address(__copy_user)
382 "0" (__cu_len), "1" (__cu_from), "2" (__cu_to)
383 : "$1","$2","$3","$4","$5","$28","memory");
389 __copy_tofrom_user(void *to, const void *from, long len, const void __user *validate)
391 if (__access_ok((long)validate, len, get_fs()))
392 len = __copy_tofrom_user_nocheck(to, from, len);
396 #define __copy_to_user(to,from,n) \
399 __copy_tofrom_user_nocheck((void *)(to),(from),(n)); \
401 #define __copy_from_user(to,from,n) \
404 __copy_tofrom_user_nocheck((to),(void *)(from),(n)); \
408 copy_to_user(void __user *to, const void *from, long n)
410 return __copy_tofrom_user((void *)to, from, n, to);
414 copy_from_user(void *to, const void __user *from, long n)
416 return __copy_tofrom_user(to, (void *)from, n, from);
419 extern void __do_clear_user(void);
422 __clear_user(void __user *to, long len)
424 register void __user * __cl_to __asm__("$6") = to;
425 register long __cl_len __asm__("$0") = len;
426 __asm__ __volatile__(
427 __module_call(28, 2, __do_clear_user)
428 : "=r"(__cl_len), "=r"(__cl_to)
429 : __module_address(__do_clear_user)
430 "0"(__cl_len), "1"(__cl_to)
431 : "$1","$2","$3","$4","$5","$28","memory");
436 clear_user(void __user *to, long len)
438 if (__access_ok((long)to, len, get_fs()))
439 len = __clear_user(to, len);
443 #undef __module_address
446 /* Returns: -EFAULT if exception before terminator, N if the entire
447 buffer filled, else strlen. */
449 extern long __strncpy_from_user(char *__to, const char __user *__from, long __to_len);
452 strncpy_from_user(char *to, const char __user *from, long n)
455 if (__access_ok((long)from, 0, get_fs()))
456 ret = __strncpy_from_user(to, from, n);
460 /* Returns: 0 if bad, string length+1 (memory size) of string if ok */
461 extern long __strlen_user(const char __user *);
463 extern inline long strlen_user(const char __user *str)
465 return access_ok(VERIFY_READ,str,0) ? __strlen_user(str) : 0;
468 /* Returns: 0 if exception before NUL or reaching the supplied limit (N),
469 * a value greater than N if the limit would be exceeded, else strlen. */
470 extern long __strnlen_user(const char __user *, long);
472 extern inline long strnlen_user(const char __user *str, long n)
474 return access_ok(VERIFY_READ,str,0) ? __strnlen_user(str, n) : 0;
478 * About the exception table:
480 * - insn is a 32-bit pc-relative offset from the faulting insn.
481 * - nextinsn is a 16-bit offset off of the faulting instruction
482 * (not off of the *next* instruction as branches are).
483 * - errreg is the register in which to place -EFAULT.
484 * - valreg is the final target register for the load sequence
485 * and will be zeroed.
487 * Either errreg or valreg may be $31, in which case nothing happens.
489 * The exception fixup information "just so happens" to be arranged
490 * as in a MEM format instruction. This lets us emit our three
493 * lda valreg, nextinsn(errreg)
497 struct exception_table_entry
500 union exception_fixup {
503 signed int nextinsn : 16;
504 unsigned int errreg : 5;
505 unsigned int valreg : 5;
510 /* Returns the new pc */
511 #define fixup_exception(map_reg, fixup, pc) \
513 if ((fixup)->fixup.bits.valreg != 31) \
514 map_reg((fixup)->fixup.bits.valreg) = 0; \
515 if ((fixup)->fixup.bits.errreg != 31) \
516 map_reg((fixup)->fixup.bits.errreg) = -EFAULT; \
517 (pc) + (fixup)->fixup.bits.nextinsn; \
521 #endif /* __ALPHA_UACCESS_H */