1 #ifndef __X86_64_UACCESS_H
2 #define __X86_64_UACCESS_H
5 * User space memory access functions
7 #include <linux/config.h>
8 #include <linux/compiler.h>
9 #include <linux/errno.h>
10 #include <linux/sched.h>
11 #include <linux/prefetch.h>
15 #define VERIFY_WRITE 1
18 * The fs value determines whether argument validity checking should be
19 * performed or not. If get_fs() == USER_DS, checking is performed, with
20 * get_fs() == KERNEL_DS, checking is bypassed.
22 * For historical reasons, these macros are grossly misnamed.
25 #define MAKE_MM_SEG(s) ((mm_segment_t) { (s) })
27 #define KERNEL_DS MAKE_MM_SEG(0xFFFFFFFFFFFFFFFFUL)
28 #define USER_DS MAKE_MM_SEG(PAGE_OFFSET)
30 #define get_ds() (KERNEL_DS)
31 #define get_fs() (current_thread_info()->addr_limit)
32 #define set_fs(x) (current_thread_info()->addr_limit = (x))
34 #define segment_eq(a,b) ((a).seg == (b).seg)
37 #define CHECK_UPTR(ptr) do { \
38 __typeof__(*(ptr)) *__dummy_check_uptr = \
39 (void __user *)&__dummy_check_uptr; \
42 #define CHECK_UPTR(ptr)
46 #define __addr_ok(addr) (!((unsigned long)(addr) & (current_thread_info()->addr_limit.seg)))
49 * Uhhuh, this needs 65-bit arithmetic. We have a carry..
51 #define __range_not_ok(addr,size) ({ \
52 unsigned long flag,sum; \
54 asm("# range_ok\n\r" \
55 "addq %3,%1 ; sbbq %0,%0 ; cmpq %1,%4 ; sbbq $0,%0" \
56 :"=&r" (flag), "=r" (sum) \
57 :"1" (addr),"g" ((long)(size)),"g" (current_thread_info()->addr_limit.seg)); \
60 #define access_ok(type, addr, size) (__range_not_ok(addr,size) == 0)
62 extern inline int verify_area(int type, const void __user * addr, unsigned long size)
64 return access_ok(type,addr,size) ? 0 : -EFAULT;
69 * The exception table consists of pairs of addresses: the first is the
70 * address of an instruction that is allowed to fault, and the second is
71 * the address at which the program should continue. No registers are
72 * modified, so it is entirely up to the continuation code to figure out
75 * All the routines below use bits of fixup code that are out of line
76 * with the main instruction path. This means when everything is well,
77 * we don't even have to jump over them. Further, they do not intrude
78 * on our cache or tlb entries.
81 struct exception_table_entry
83 unsigned long insn, fixup;
88 * These are the main single-value transfer routines. They automatically
89 * use the right size if we just have the right pointer type.
91 * This gets kind of ugly. We want to return _two_ values in "get_user()"
92 * and yet we don't want to do any pointers, because that is too much
93 * of a performance impact. Thus we have a few rather ugly macros here,
94 * and hide all the ugliness from the user.
96 * The "__xxx" versions of the user access functions are versions that
97 * do not verify the address space, that must have been done previously
98 * with a separate "access_ok()" call (this is used when we do multiple
99 * accesses to the same area of user memory).
102 extern void __get_user_1(void);
103 extern void __get_user_2(void);
104 extern void __get_user_4(void);
105 extern void __get_user_8(void);
107 #define __get_user_x(size,ret,x,ptr) \
108 __asm__ __volatile__("call __get_user_" #size \
109 :"=a" (ret),"=d" (x) \
113 /* Careful: we have to cast the result to the type of the pointer for sign reasons */
114 #define get_user(x,ptr) \
118 switch(sizeof (*(ptr))) { \
119 case 1: __get_user_x(1,__ret_gu,__val_gu,ptr); break; \
120 case 2: __get_user_x(2,__ret_gu,__val_gu,ptr); break; \
121 case 4: __get_user_x(4,__ret_gu,__val_gu,ptr); break; \
122 case 8: __get_user_x(8,__ret_gu,__val_gu,ptr); break; \
123 default: __get_user_bad(); break; \
125 (x) = (__typeof__(*(ptr)))__val_gu; \
129 extern void __put_user_1(void);
130 extern void __put_user_2(void);
131 extern void __put_user_4(void);
132 extern void __put_user_8(void);
134 extern void __put_user_bad(void);
136 #define __put_user_x(size,ret,x,ptr) \
137 __asm__ __volatile__("call __put_user_" #size \
142 #define put_user(x,ptr) \
143 __put_user_check((__typeof__(*(ptr)))(x),(ptr),sizeof(*(ptr)))
145 #define __get_user(x,ptr) \
146 __get_user_nocheck((x),(ptr),sizeof(*(ptr)))
147 #define __put_user(x,ptr) \
148 __put_user_nocheck((__typeof__(*(ptr)))(x),(ptr),sizeof(*(ptr)))
150 #define __put_user_nocheck(x,ptr,size) \
154 __put_user_size((x),(ptr),(size),__pu_err); \
159 #define __put_user_check(x,ptr,size) \
161 int __pu_err = -EFAULT; \
162 __typeof__(*(ptr)) *__pu_addr = (ptr); \
163 if (likely(access_ok(VERIFY_WRITE,__pu_addr,size))) \
164 __put_user_size((x),__pu_addr,(size),__pu_err); \
168 #define __put_user_size(x,ptr,size,retval) \
172 case 1: __put_user_asm(x,ptr,retval,"b","b","iq",-EFAULT); break;\
173 case 2: __put_user_asm(x,ptr,retval,"w","w","ir",-EFAULT); break;\
174 case 4: __put_user_asm(x,ptr,retval,"l","k","ir",-EFAULT); break;\
175 case 8: __put_user_asm(x,ptr,retval,"q","","ir",-EFAULT); break;\
176 default: __put_user_bad(); \
180 /* FIXME: this hack is definitely wrong -AK */
181 struct __large_struct { unsigned long buf[100]; };
182 #define __m(x) (*(struct __large_struct *)(x))
185 * Tell gcc we read from memory instead of writing: this is because
186 * we do not write to any memory gcc knows about, so there are no
189 #define __put_user_asm(x, addr, err, itype, rtype, ltype, errno) \
190 __asm__ __volatile__( \
191 "1: mov"itype" %"rtype"1,%2\n" \
193 ".section .fixup,\"ax\"\n" \
197 ".section __ex_table,\"a\"\n" \
202 : ltype (x), "m"(__m(addr)), "i"(errno), "0"(err))
205 #define __get_user_nocheck(x,ptr,size) \
210 __get_user_size(__gu_val,(ptr),(size),__gu_err); \
211 (x) = (__typeof__(*(ptr)))__gu_val; \
215 extern int __get_user_bad(void);
217 #define __get_user_size(x,ptr,size,retval) \
221 case 1: __get_user_asm(x,ptr,retval,"b","b","=q",-EFAULT); break;\
222 case 2: __get_user_asm(x,ptr,retval,"w","w","=r",-EFAULT); break;\
223 case 4: __get_user_asm(x,ptr,retval,"l","k","=r",-EFAULT); break;\
224 case 8: __get_user_asm(x,ptr,retval,"q","","=r",-EFAULT); break;\
225 default: (x) = __get_user_bad(); \
229 #define __get_user_asm(x, addr, err, itype, rtype, ltype, errno) \
230 __asm__ __volatile__( \
231 "1: mov"itype" %2,%"rtype"1\n" \
233 ".section .fixup,\"ax\"\n" \
235 " xor"itype" %"rtype"1,%"rtype"1\n" \
238 ".section __ex_table,\"a\"\n" \
242 : "=r"(err), ltype (x) \
243 : "m"(__m(addr)), "i"(errno), "0"(err))
246 * Copy To/From Userspace
249 /* Handles exceptions in both to and from, but doesn't do access_ok */
250 extern unsigned long copy_user_generic(void *to, const void *from, unsigned len);
252 extern unsigned long copy_to_user(void __user *to, const void *from, unsigned len);
253 extern unsigned long copy_from_user(void *to, const void __user *from, unsigned len);
254 extern unsigned long copy_in_user(void __user *to, const void __user *from, unsigned len);
256 static inline int __copy_from_user(void *dst, const void __user *src, unsigned size)
259 if (!__builtin_constant_p(size))
260 return copy_user_generic(dst,(void *)src,size);
262 case 1:__get_user_asm(*(u8*)dst,(u8 *)src,ret,"b","b","=q",1);
264 case 2:__get_user_asm(*(u16*)dst,(u16*)src,ret,"w","w","=r",2);
266 case 4:__get_user_asm(*(u32*)dst,(u32*)src,ret,"l","k","=r",4);
268 case 8:__get_user_asm(*(u64*)dst,(u64*)src,ret,"q","","=r",8);
271 __get_user_asm(*(u64*)dst,(u64*)src,ret,"q","","=r",16);
272 if (unlikely(ret)) return ret;
273 __get_user_asm(*(u16*)(8+(char*)dst),(u16*)(8+(char*)src),ret,"w","w","=r",2);
276 __get_user_asm(*(u64*)dst,(u64*)src,ret,"q","","=r",16);
277 if (unlikely(ret)) return ret;
278 __get_user_asm(*(u64*)(8+(char*)dst),(u64*)(8+(char*)src),ret,"q","","=r",8);
281 return copy_user_generic(dst,(void *)src,size);
285 static inline int __copy_to_user(void __user *dst, const void *src, unsigned size)
288 if (!__builtin_constant_p(size))
289 return copy_user_generic((void *)dst,src,size);
291 case 1:__put_user_asm(*(u8*)src,(u8 *)dst,ret,"b","b","iq",1);
293 case 2:__put_user_asm(*(u16*)src,(u16*)dst,ret,"w","w","ir",2);
295 case 4:__put_user_asm(*(u32*)src,(u32*)dst,ret,"l","k","ir",4);
297 case 8:__put_user_asm(*(u64*)src,(u64*)dst,ret,"q","","ir",8);
300 __put_user_asm(*(u64*)src,(u64*)dst,ret,"q","","ir",10);
301 if (unlikely(ret)) return ret;
303 __put_user_asm(4[(u16*)src],4+(u16*)dst,ret,"w","w","ir",2);
306 __put_user_asm(*(u64*)src,(u64*)dst,ret,"q","","ir",16);
307 if (unlikely(ret)) return ret;
309 __put_user_asm(1[(u64*)src],1+(u64*)dst,ret,"q","","ir",8);
312 return copy_user_generic((void *)dst,src,size);
317 static inline int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
320 if (!__builtin_constant_p(size))
321 return copy_user_generic((void *)dst,(void *)src,size);
325 __get_user_asm(tmp,(u8 *)src,ret,"b","b","=q",1);
327 __put_user_asm(tmp,(u8 *)dst,ret,"b","b","iq",1);
332 __get_user_asm(tmp,(u16 *)src,ret,"w","w","=r",2);
334 __put_user_asm(tmp,(u16 *)dst,ret,"w","w","ir",2);
340 __get_user_asm(tmp,(u32 *)src,ret,"l","k","=r",4);
342 __put_user_asm(tmp,(u32 *)dst,ret,"l","k","ir",4);
347 __get_user_asm(tmp,(u64 *)src,ret,"q","","=r",8);
349 __put_user_asm(tmp,(u64 *)dst,ret,"q","","ir",8);
353 return copy_user_generic((void *)dst,(void *)src,size);
357 long strncpy_from_user(char *dst, const char __user *src, long count);
358 long __strncpy_from_user(char *dst, const char __user *src, long count);
359 long strnlen_user(const char __user *str, long n);
360 long strlen_user(const char __user *str);
361 unsigned long clear_user(void __user *mem, unsigned long len);
362 unsigned long __clear_user(void __user *mem, unsigned long len);
364 #endif /* __X86_64_UACCESS_H */