2 * linux/include/asm-arm/uaccess.h
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
8 #ifndef _ASMARM_UACCESS_H
9 #define _ASMARM_UACCESS_H
12 * User space memory access functions
14 #include <linux/sched.h>
15 #include <asm/errno.h>
16 #include <asm/arch/memory.h>
17 #include <asm/domain.h>
18 #include <asm/system.h>
21 #define VERIFY_WRITE 1
24 * The exception table consists of pairs of addresses: the first is the
25 * address of an instruction that is allowed to fault, and the second is
26 * the address at which the program should continue. No registers are
27 * modified, so it is entirely up to the continuation code to figure out
30 * All the routines below use bits of fixup code that are out of line
31 * with the main instruction path. This means when everything is well,
32 * we don't even have to jump over them. Further, they do not intrude
33 * on our cache or tlb entries.
36 struct exception_table_entry
38 unsigned long insn, fixup;
41 extern int fixup_exception(struct pt_regs *regs);
44 * Note that this is actually 0x1,0000,0000
46 #define KERNEL_DS 0x00000000
47 #define USER_DS TASK_SIZE
49 #define get_ds() (KERNEL_DS)
50 #define get_fs() (current_thread_info()->addr_limit)
52 static inline void set_fs (mm_segment_t fs)
54 current_thread_info()->addr_limit = fs;
55 modify_domain(DOMAIN_KERNEL, fs ? DOMAIN_CLIENT : DOMAIN_MANAGER);
58 #define segment_eq(a,b) ((a) == (b))
60 #define __addr_ok(addr) ({ \
62 __asm__("cmp %2, %0; movlo %0, #0" \
64 : "0" (current_thread_info()->addr_limit), "r" (addr) \
68 /* We use 33-bit arithmetic here... */
69 #define __range_ok(addr,size) ({ \
70 unsigned long flag, sum; \
71 __asm__("adds %1, %2, %3; sbcccs %1, %1, %0; movcc %0, #0" \
72 : "=&r" (flag), "=&r" (sum) \
73 : "r" (addr), "Ir" (size), "0" (current_thread_info()->addr_limit) \
77 #define access_ok(type,addr,size) (__range_ok(addr,size) == 0)
79 static inline int verify_area(int type, const void __user *addr, unsigned long size)
81 return access_ok(type, addr, size) ? 0 : -EFAULT;
85 * Single-value transfer routines. They automatically use the right
86 * size if we just have the right pointer type. Note that the functions
87 * which read from user space (*get_*) need to take care not to leak
88 * kernel data even if the calling code is buggy and fails to check
89 * the return value. This means zeroing out the destination variable
90 * or buffer on error. Normally this is done out of line by the
91 * fixup code, but there are a few places where it intrudes on the
92 * main code path. When we only write to user space, there is no
95 * The "__xxx" versions of the user access functions do not verify the
96 * address space - it must have been done previously with a separate
99 * The "xxx_error" versions set the third argument to EFAULT if an
100 * error occurs, and leave it unchanged on success. Note that these
101 * versions are void (ie, don't return a value as such).
104 extern int __get_user_1(void *);
105 extern int __get_user_2(void *);
106 extern int __get_user_4(void *);
107 extern int __get_user_8(void *);
108 extern int __get_user_bad(void);
110 #define __get_user_x(__r1,__p,__e,__s,__i...) \
111 __asm__ __volatile__ ( \
112 __asmeq("%0", "r0") __asmeq("%1", "r1") \
113 "bl __get_user_" #__s \
114 : "=&r" (__e), "=r" (__r1) \
118 #define get_user(x,p) \
120 const register typeof(*(p)) *__p asm("r0") = (p); \
121 register typeof(*(p)) __r1 asm("r1"); \
122 register int __e asm("r0"); \
123 switch (sizeof(*(__p))) { \
125 __get_user_x(__r1, __p, __e, 1, "lr"); \
128 __get_user_x(__r1, __p, __e, 2, "r2", "lr"); \
131 __get_user_x(__r1, __p, __e, 4, "lr"); \
134 __get_user_x(__r1, __p, __e, 8, "lr"); \
136 default: __e = __get_user_bad(); break; \
142 #define __get_user(x,ptr) \
145 __get_user_err((x),(ptr),__gu_err); \
149 #define __get_user_error(x,ptr,err) \
151 __get_user_err((x),(ptr),err); \
155 #define __get_user_err(x,ptr,err) \
157 unsigned long __gu_addr = (unsigned long)(ptr); \
158 unsigned long __gu_val; \
159 switch (sizeof(*(ptr))) { \
160 case 1: __get_user_asm_byte(__gu_val,__gu_addr,err); break; \
161 case 2: __get_user_asm_half(__gu_val,__gu_addr,err); break; \
162 case 4: __get_user_asm_word(__gu_val,__gu_addr,err); break; \
163 default: (__gu_val) = __get_user_bad(); \
165 (x) = (__typeof__(*(ptr)))__gu_val; \
168 #define __get_user_asm_byte(x,addr,err) \
169 __asm__ __volatile__( \
170 "1: ldrbt %1,[%2],#0\n" \
172 " .section .fixup,\"ax\"\n" \
178 " .section __ex_table,\"a\"\n" \
182 : "+r" (err), "=&r" (x) \
183 : "r" (addr), "i" (-EFAULT) \
187 #define __get_user_asm_half(x,__gu_addr,err) \
189 unsigned long __b1, __b2; \
190 __get_user_asm_byte(__b1, __gu_addr, err); \
191 __get_user_asm_byte(__b2, __gu_addr + 1, err); \
192 (x) = __b1 | (__b2 << 8); \
195 #define __get_user_asm_half(x,__gu_addr,err) \
197 unsigned long __b1, __b2; \
198 __get_user_asm_byte(__b1, __gu_addr, err); \
199 __get_user_asm_byte(__b2, __gu_addr + 1, err); \
200 (x) = (__b1 << 8) | __b2; \
204 #define __get_user_asm_word(x,addr,err) \
205 __asm__ __volatile__( \
206 "1: ldrt %1,[%2],#0\n" \
208 " .section .fixup,\"ax\"\n" \
214 " .section __ex_table,\"a\"\n" \
218 : "+r" (err), "=&r" (x) \
219 : "r" (addr), "i" (-EFAULT) \
222 extern int __put_user_1(void *, unsigned int);
223 extern int __put_user_2(void *, unsigned int);
224 extern int __put_user_4(void *, unsigned int);
225 extern int __put_user_8(void *, unsigned long long);
226 extern int __put_user_bad(void);
228 #define __put_user_x(__r1,__p,__e,__s) \
229 __asm__ __volatile__ ( \
230 __asmeq("%0", "r0") __asmeq("%2", "r1") \
231 "bl __put_user_" #__s \
233 : "0" (__p), "r" (__r1) \
236 #define put_user(x,p) \
238 const register typeof(*(p)) __r1 asm("r1") = (x); \
239 const register typeof(*(p)) *__p asm("r0") = (p); \
240 register int __e asm("r0"); \
241 switch (sizeof(*(__p))) { \
243 __put_user_x(__r1, __p, __e, 1); \
246 __put_user_x(__r1, __p, __e, 2); \
249 __put_user_x(__r1, __p, __e, 4); \
252 __put_user_x(__r1, __p, __e, 8); \
254 default: __e = __put_user_bad(); break; \
259 #define __put_user(x,ptr) \
262 __put_user_err((x),(ptr),__pu_err); \
266 #define __put_user_error(x,ptr,err) \
268 __put_user_err((x),(ptr),err); \
272 #define __put_user_err(x,ptr,err) \
274 unsigned long __pu_addr = (unsigned long)(ptr); \
275 __typeof__(*(ptr)) __pu_val = (x); \
276 switch (sizeof(*(ptr))) { \
277 case 1: __put_user_asm_byte(__pu_val,__pu_addr,err); break; \
278 case 2: __put_user_asm_half(__pu_val,__pu_addr,err); break; \
279 case 4: __put_user_asm_word(__pu_val,__pu_addr,err); break; \
280 case 8: __put_user_asm_dword(__pu_val,__pu_addr,err); break; \
281 default: __put_user_bad(); \
285 #define __put_user_asm_byte(x,__pu_addr,err) \
286 __asm__ __volatile__( \
287 "1: strbt %1,[%2],#0\n" \
289 " .section .fixup,\"ax\"\n" \
294 " .section __ex_table,\"a\"\n" \
299 : "r" (x), "r" (__pu_addr), "i" (-EFAULT) \
303 #define __put_user_asm_half(x,__pu_addr,err) \
305 unsigned long __temp = (unsigned long)(x); \
306 __put_user_asm_byte(__temp, __pu_addr, err); \
307 __put_user_asm_byte(__temp >> 8, __pu_addr + 1, err); \
310 #define __put_user_asm_half(x,__pu_addr,err) \
312 unsigned long __temp = (unsigned long)(x); \
313 __put_user_asm_byte(__temp >> 8, __pu_addr, err); \
314 __put_user_asm_byte(__temp, __pu_addr + 1, err); \
318 #define __put_user_asm_word(x,__pu_addr,err) \
319 __asm__ __volatile__( \
320 "1: strt %1,[%2],#0\n" \
322 " .section .fixup,\"ax\"\n" \
327 " .section __ex_table,\"a\"\n" \
332 : "r" (x), "r" (__pu_addr), "i" (-EFAULT) \
336 #define __reg_oper0 "%R2"
337 #define __reg_oper1 "%Q2"
339 #define __reg_oper0 "%Q2"
340 #define __reg_oper1 "%R2"
343 #define __put_user_asm_dword(x,__pu_addr,err) \
344 __asm__ __volatile__( \
345 "1: strt " __reg_oper1 ", [%1], #4\n" \
346 "2: strt " __reg_oper0 ", [%1], #0\n" \
348 " .section .fixup,\"ax\"\n" \
353 " .section __ex_table,\"a\"\n" \
358 : "+r" (err), "+r" (__pu_addr) \
359 : "r" (x), "i" (-EFAULT) \
362 extern unsigned long __arch_copy_from_user(void *to, const void __user *from, unsigned long n);
363 extern unsigned long __arch_copy_to_user(void __user *to, const void *from, unsigned long n);
364 extern unsigned long __arch_clear_user(void __user *addr, unsigned long n);
365 extern unsigned long __arch_strncpy_from_user(char *to, const char __user *from, unsigned long count);
366 extern unsigned long __arch_strnlen_user(const char __user *s, long n);
368 static inline unsigned long copy_from_user(void *to, const void __user *from, unsigned long n)
370 if (access_ok(VERIFY_READ, from, n))
371 n = __arch_copy_from_user(to, from, n);
372 else /* security hole - plug it */
377 static inline unsigned long __copy_from_user(void *to, const void __user *from, unsigned long n)
379 return __arch_copy_from_user(to, from, n);
382 static inline unsigned long copy_to_user(void __user *to, const void *from, unsigned long n)
384 if (access_ok(VERIFY_WRITE, to, n))
385 n = __arch_copy_to_user(to, from, n);
389 static inline unsigned long __copy_to_user(void __user *to, const void *from, unsigned long n)
391 return __arch_copy_to_user(to, from, n);
394 static inline unsigned long clear_user (void __user *to, unsigned long n)
396 if (access_ok(VERIFY_WRITE, to, n))
397 n = __arch_clear_user(to, n);
401 static inline unsigned long __clear_user (void __user *to, unsigned long n)
403 return __arch_clear_user(to, n);
406 static inline long strncpy_from_user (char *dst, const char __user *src, long count)
409 if (access_ok(VERIFY_READ, src, 1))
410 res = __arch_strncpy_from_user(dst, src, count);
414 static inline long __strncpy_from_user (char *dst, const char __user *src, long count)
416 return __arch_strncpy_from_user(dst, src, count);
419 #define strlen_user(s) strnlen_user(s, ~0UL >> 1)
421 static inline long strnlen_user(const char __user *s, long n)
423 unsigned long res = 0;
426 res = __arch_strnlen_user(s, n);
431 #endif /* _ASMARM_UACCESS_H */