1 #ifndef __ALPHA_UACCESS_H
2 #define __ALPHA_UACCESS_H
4 #include <linux/errno.h>
5 #include <linux/sched.h>
9 * The fs value determines whether argument validity checking should be
10 * performed or not. If get_fs() == USER_DS, checking is performed, with
11 * get_fs() == KERNEL_DS, checking is bypassed.
13 * Or at least it did once upon a time. Nowadays it is a mask that
14 * defines which bits of the address space are off limits. This is a
15 * wee bit faster than the above.
17 * For historical reasons, these macros are grossly misnamed.
20 #define KERNEL_DS ((mm_segment_t) { 0UL })
21 #define USER_DS ((mm_segment_t) { -0x40000000000UL })
24 #define VERIFY_WRITE 1
26 #define get_fs() (current_thread_info()->addr_limit)
27 #define get_ds() (KERNEL_DS)
28 #define set_fs(x) (current_thread_info()->addr_limit = (x))
30 #define segment_eq(a,b) ((a).seg == (b).seg)
33 * Is a address valid? This does a straightforward calculation rather
37 * - "addr" doesn't have any high-bits set
38 * - AND "size" doesn't have any high-bits set
39 * - AND "addr+size" doesn't have any high-bits set
40 * - OR we are in kernel mode.
42 #define __access_ok(addr,size,segment) \
43 (((segment).seg & (addr | size | (addr+size))) == 0)
45 #define access_ok(type,addr,size) \
47 __chk_user_ptr(addr); \
48 __access_ok(((unsigned long)(addr)),(size),get_fs()); \
51 extern inline int verify_area(int type, const void __user * addr, unsigned long size)
53 return access_ok(type,addr,size) ? 0 : -EFAULT;
57 * These are the main single-value transfer routines. They automatically
58 * use the right size if we just have the right pointer type.
60 * As the alpha uses the same address space for kernel and user
61 * data, we can just do these as direct assignments. (Of course, the
62 * exception handling means that it's no longer "just"...)
65 * (a) re-use the arguments for side effects (sizeof/typeof is ok)
66 * (b) require any knowledge of processes at this stage
68 #define put_user(x,ptr) \
69 __put_user_check((__typeof__(*(ptr)))(x),(ptr),sizeof(*(ptr)),get_fs())
70 #define get_user(x,ptr) \
71 __get_user_check((x),(ptr),sizeof(*(ptr)),get_fs())
74 * The "__xxx" versions do not do address space checking, useful when
75 * doing multiple accesses to the same area (the programmer has to do the
76 * checks by hand with "access_ok()")
78 #define __put_user(x,ptr) \
79 __put_user_nocheck((__typeof__(*(ptr)))(x),(ptr),sizeof(*(ptr)))
80 #define __get_user(x,ptr) \
81 __get_user_nocheck((x),(ptr),sizeof(*(ptr)))
84 * The "lda %1, 2b-1b(%0)" bits are magic to get the assembler to
85 * encode the bits we need for resolving the exception. See the
86 * more extensive comments with fixup_inline_exception below for
90 extern void __get_user_unknown(void);
92 #define __get_user_nocheck(x,ptr,size) \
95 unsigned long __gu_val; \
96 __chk_user_ptr(ptr); \
98 case 1: __get_user_8(ptr); break; \
99 case 2: __get_user_16(ptr); break; \
100 case 4: __get_user_32(ptr); break; \
101 case 8: __get_user_64(ptr); break; \
102 default: __get_user_unknown(); break; \
104 (x) = (__typeof__(*(ptr))) __gu_val; \
108 #define __get_user_check(x,ptr,size,segment) \
110 long __gu_err = -EFAULT; \
111 unsigned long __gu_val = 0; \
112 const __typeof__(*(ptr)) __user *__gu_addr = (ptr); \
113 if (__access_ok((unsigned long)__gu_addr,size,segment)) { \
116 case 1: __get_user_8(__gu_addr); break; \
117 case 2: __get_user_16(__gu_addr); break; \
118 case 4: __get_user_32(__gu_addr); break; \
119 case 8: __get_user_64(__gu_addr); break; \
120 default: __get_user_unknown(); break; \
123 (x) = (__typeof__(*(ptr))) __gu_val; \
127 struct __large_struct { unsigned long buf[100]; };
128 #define __m(x) (*(struct __large_struct __user *)(x))
130 #define __get_user_64(addr) \
131 __asm__("1: ldq %0,%2\n" \
133 ".section __ex_table,\"a\"\n" \
135 " lda %0, 2b-1b(%1)\n" \
137 : "=r"(__gu_val), "=r"(__gu_err) \
138 : "m"(__m(addr)), "1"(__gu_err))
140 #define __get_user_32(addr) \
141 __asm__("1: ldl %0,%2\n" \
143 ".section __ex_table,\"a\"\n" \
145 " lda %0, 2b-1b(%1)\n" \
147 : "=r"(__gu_val), "=r"(__gu_err) \
148 : "m"(__m(addr)), "1"(__gu_err))
151 /* Those lucky bastards with ev56 and later CPUs can do byte/word moves. */
153 #define __get_user_16(addr) \
154 __asm__("1: ldwu %0,%2\n" \
156 ".section __ex_table,\"a\"\n" \
158 " lda %0, 2b-1b(%1)\n" \
160 : "=r"(__gu_val), "=r"(__gu_err) \
161 : "m"(__m(addr)), "1"(__gu_err))
163 #define __get_user_8(addr) \
164 __asm__("1: ldbu %0,%2\n" \
166 ".section __ex_table,\"a\"\n" \
168 " lda %0, 2b-1b(%1)\n" \
170 : "=r"(__gu_val), "=r"(__gu_err) \
171 : "m"(__m(addr)), "1"(__gu_err))
173 /* Unfortunately, we can't get an unaligned access trap for the sub-word
174 load, so we have to do a general unaligned operation. */
176 #define __get_user_16(addr) \
179 __asm__("1: ldq_u %0,0(%3)\n" \
180 "2: ldq_u %1,1(%3)\n" \
181 " extwl %0,%3,%0\n" \
182 " extwh %1,%3,%1\n" \
185 ".section __ex_table,\"a\"\n" \
187 " lda %0, 3b-1b(%2)\n" \
189 " lda %0, 3b-2b(%2)\n" \
191 : "=&r"(__gu_val), "=&r"(__gu_tmp), "=r"(__gu_err) \
192 : "r"(addr), "2"(__gu_err)); \
195 #define __get_user_8(addr) \
196 __asm__("1: ldq_u %0,0(%2)\n" \
197 " extbl %0,%2,%0\n" \
199 ".section __ex_table,\"a\"\n" \
201 " lda %0, 2b-1b(%1)\n" \
203 : "=&r"(__gu_val), "=r"(__gu_err) \
204 : "r"(addr), "1"(__gu_err))
207 extern void __put_user_unknown(void);
209 #define __put_user_nocheck(x,ptr,size) \
212 __chk_user_ptr(ptr); \
214 case 1: __put_user_8(x,ptr); break; \
215 case 2: __put_user_16(x,ptr); break; \
216 case 4: __put_user_32(x,ptr); break; \
217 case 8: __put_user_64(x,ptr); break; \
218 default: __put_user_unknown(); break; \
223 #define __put_user_check(x,ptr,size,segment) \
225 long __pu_err = -EFAULT; \
226 __typeof__(*(ptr)) __user *__pu_addr = (ptr); \
227 if (__access_ok((unsigned long)__pu_addr,size,segment)) { \
230 case 1: __put_user_8(x,__pu_addr); break; \
231 case 2: __put_user_16(x,__pu_addr); break; \
232 case 4: __put_user_32(x,__pu_addr); break; \
233 case 8: __put_user_64(x,__pu_addr); break; \
234 default: __put_user_unknown(); break; \
241 * The "__put_user_xx()" macros tell gcc they read from memory
242 * instead of writing: this is because they do not write to
243 * any memory gcc knows about, so there are no aliasing issues
245 #define __put_user_64(x,addr) \
246 __asm__ __volatile__("1: stq %r2,%1\n" \
248 ".section __ex_table,\"a\"\n" \
250 " lda $31,2b-1b(%0)\n" \
253 : "m" (__m(addr)), "rJ" (x), "0"(__pu_err))
255 #define __put_user_32(x,addr) \
256 __asm__ __volatile__("1: stl %r2,%1\n" \
258 ".section __ex_table,\"a\"\n" \
260 " lda $31,2b-1b(%0)\n" \
263 : "m"(__m(addr)), "rJ"(x), "0"(__pu_err))
266 /* Those lucky bastards with ev56 and later CPUs can do byte/word moves. */
268 #define __put_user_16(x,addr) \
269 __asm__ __volatile__("1: stw %r2,%1\n" \
271 ".section __ex_table,\"a\"\n" \
273 " lda $31,2b-1b(%0)\n" \
276 : "m"(__m(addr)), "rJ"(x), "0"(__pu_err))
278 #define __put_user_8(x,addr) \
279 __asm__ __volatile__("1: stb %r2,%1\n" \
281 ".section __ex_table,\"a\"\n" \
283 " lda $31,2b-1b(%0)\n" \
286 : "m"(__m(addr)), "rJ"(x), "0"(__pu_err))
288 /* Unfortunately, we can't get an unaligned access trap for the sub-word
289 write, so we have to do a general unaligned operation. */
291 #define __put_user_16(x,addr) \
293 long __pu_tmp1, __pu_tmp2, __pu_tmp3, __pu_tmp4; \
294 __asm__ __volatile__( \
295 "1: ldq_u %2,1(%5)\n" \
296 "2: ldq_u %1,0(%5)\n" \
297 " inswh %6,%5,%4\n" \
298 " inswl %6,%5,%3\n" \
299 " mskwh %2,%5,%2\n" \
300 " mskwl %1,%5,%1\n" \
303 "3: stq_u %2,1(%5)\n" \
304 "4: stq_u %1,0(%5)\n" \
306 ".section __ex_table,\"a\"\n" \
308 " lda $31, 5b-1b(%0)\n" \
310 " lda $31, 5b-2b(%0)\n" \
312 " lda $31, 5b-3b(%0)\n" \
314 " lda $31, 5b-4b(%0)\n" \
316 : "=r"(__pu_err), "=&r"(__pu_tmp1), \
317 "=&r"(__pu_tmp2), "=&r"(__pu_tmp3), \
319 : "r"(addr), "r"((unsigned long)(x)), "0"(__pu_err)); \
322 #define __put_user_8(x,addr) \
324 long __pu_tmp1, __pu_tmp2; \
325 __asm__ __volatile__( \
326 "1: ldq_u %1,0(%4)\n" \
327 " insbl %3,%4,%2\n" \
328 " mskbl %1,%4,%1\n" \
330 "2: stq_u %1,0(%4)\n" \
332 ".section __ex_table,\"a\"\n" \
334 " lda $31, 3b-1b(%0)\n" \
336 " lda $31, 3b-2b(%0)\n" \
339 "=&r"(__pu_tmp1), "=&r"(__pu_tmp2) \
340 : "r"((unsigned long)(x)), "r"(addr), "0"(__pu_err)); \
346 * Complex access routines
349 /* This little bit of silliness is to get the GP loaded for a function
350 that ordinarily wouldn't. Otherwise we could have it done by the macro
351 directly, which can be optimized the linker. */
353 #define __module_address(sym) "r"(sym),
354 #define __module_call(ra, arg, sym) "jsr $" #ra ",(%" #arg ")," #sym
356 #define __module_address(sym)
357 #define __module_call(ra, arg, sym) "bsr $" #ra "," #sym " !samegp"
360 extern void __copy_user(void);
363 __copy_tofrom_user_nocheck(void *to, const void *from, long len)
365 register void * __cu_to __asm__("$6") = to;
366 register const void * __cu_from __asm__("$7") = from;
367 register long __cu_len __asm__("$0") = len;
369 __asm__ __volatile__(
370 __module_call(28, 3, __copy_user)
371 : "=r" (__cu_len), "=r" (__cu_from), "=r" (__cu_to)
372 : __module_address(__copy_user)
373 "0" (__cu_len), "1" (__cu_from), "2" (__cu_to)
374 : "$1","$2","$3","$4","$5","$28","memory");
380 __copy_tofrom_user(void *to, const void *from, long len, const void __user *validate)
382 if (__access_ok((unsigned long)validate, len, get_fs()))
383 len = __copy_tofrom_user_nocheck(to, from, len);
387 #define __copy_to_user(to,from,n) \
389 __chk_user_ptr(to); \
390 __copy_tofrom_user_nocheck((__force void *)(to),(from),(n)); \
392 #define __copy_from_user(to,from,n) \
394 __chk_user_ptr(from); \
395 __copy_tofrom_user_nocheck((to),(__force void *)(from),(n)); \
398 #define __copy_to_user_inatomic __copy_to_user
399 #define __copy_from_user_inatomic __copy_from_user
403 copy_to_user(void __user *to, const void *from, long n)
405 return __copy_tofrom_user((__force void *)to, from, n, to);
409 copy_from_user(void *to, const void __user *from, long n)
411 return __copy_tofrom_user(to, (__force void *)from, n, from);
414 extern void __do_clear_user(void);
417 __clear_user(void __user *to, long len)
419 register void __user * __cl_to __asm__("$6") = to;
420 register long __cl_len __asm__("$0") = len;
421 __asm__ __volatile__(
422 __module_call(28, 2, __do_clear_user)
423 : "=r"(__cl_len), "=r"(__cl_to)
424 : __module_address(__do_clear_user)
425 "0"(__cl_len), "1"(__cl_to)
426 : "$1","$2","$3","$4","$5","$28","memory");
431 clear_user(void __user *to, long len)
433 if (__access_ok((unsigned long)to, len, get_fs()))
434 len = __clear_user(to, len);
438 #undef __module_address
441 /* Returns: -EFAULT if exception before terminator, N if the entire
442 buffer filled, else strlen. */
444 extern long __strncpy_from_user(char *__to, const char __user *__from, long __to_len);
447 strncpy_from_user(char *to, const char __user *from, long n)
450 if (__access_ok((unsigned long)from, 0, get_fs()))
451 ret = __strncpy_from_user(to, from, n);
455 /* Returns: 0 if bad, string length+1 (memory size) of string if ok */
456 extern long __strlen_user(const char __user *);
458 extern inline long strlen_user(const char __user *str)
460 return access_ok(VERIFY_READ,str,0) ? __strlen_user(str) : 0;
463 /* Returns: 0 if exception before NUL or reaching the supplied limit (N),
464 * a value greater than N if the limit would be exceeded, else strlen. */
465 extern long __strnlen_user(const char __user *, long);
467 extern inline long strnlen_user(const char __user *str, long n)
469 return access_ok(VERIFY_READ,str,0) ? __strnlen_user(str, n) : 0;
473 * About the exception table:
475 * - insn is a 32-bit pc-relative offset from the faulting insn.
476 * - nextinsn is a 16-bit offset off of the faulting instruction
477 * (not off of the *next* instruction as branches are).
478 * - errreg is the register in which to place -EFAULT.
479 * - valreg is the final target register for the load sequence
480 * and will be zeroed.
482 * Either errreg or valreg may be $31, in which case nothing happens.
484 * The exception fixup information "just so happens" to be arranged
485 * as in a MEM format instruction. This lets us emit our three
488 * lda valreg, nextinsn(errreg)
492 struct exception_table_entry
495 union exception_fixup {
498 signed int nextinsn : 16;
499 unsigned int errreg : 5;
500 unsigned int valreg : 5;
505 /* Returns the new pc */
506 #define fixup_exception(map_reg, fixup, pc) \
508 if ((fixup)->fixup.bits.valreg != 31) \
509 map_reg((fixup)->fixup.bits.valreg) = 0; \
510 if ((fixup)->fixup.bits.errreg != 31) \
511 map_reg((fixup)->fixup.bits.errreg) = -EFAULT; \
512 (pc) + (fixup)->fixup.bits.nextinsn; \
516 #endif /* __ALPHA_UACCESS_H */