1 #ifndef __X86_64_UACCESS_H
2 #define __X86_64_UACCESS_H
5 * User space memory access functions
7 #include <linux/config.h>
8 #include <linux/compiler.h>
9 #include <linux/errno.h>
10 #include <linux/sched.h>
11 #include <linux/prefetch.h>
15 #define VERIFY_WRITE 1
18 * The fs value determines whether argument validity checking should be
19 * performed or not. If get_fs() == USER_DS, checking is performed, with
20 * get_fs() == KERNEL_DS, checking is bypassed.
22 * For historical reasons, these macros are grossly misnamed.
25 #define MAKE_MM_SEG(s) ((mm_segment_t) { (s) })
27 #define KERNEL_DS MAKE_MM_SEG(0xFFFFFFFFFFFFFFFFUL)
28 #define USER_DS MAKE_MM_SEG(PAGE_OFFSET)
30 #define get_ds() (KERNEL_DS)
31 #define get_fs() (current_thread_info()->addr_limit)
32 #define set_fs(x) (current_thread_info()->addr_limit = (x))
34 #define segment_eq(a,b) ((a).seg == (b).seg)
36 #define __addr_ok(addr) (!((unsigned long)(addr) & (current_thread_info()->addr_limit.seg)))
39 * Uhhuh, this needs 65-bit arithmetic. We have a carry..
41 #define __range_not_ok(addr,size) ({ \
42 unsigned long flag,sum; \
43 __chk_user_ptr(addr); \
44 asm("# range_ok\n\r" \
45 "addq %3,%1 ; sbbq %0,%0 ; cmpq %1,%4 ; sbbq $0,%0" \
46 :"=&r" (flag), "=r" (sum) \
47 :"1" (addr),"g" ((long)(size)),"g" (current_thread_info()->addr_limit.seg)); \
50 #define access_ok(type, addr, size) (__range_not_ok(addr,size) == 0)
52 extern inline int verify_area(int type, const void __user * addr, unsigned long size)
54 return access_ok(type,addr,size) ? 0 : -EFAULT;
59 * The exception table consists of pairs of addresses: the first is the
60 * address of an instruction that is allowed to fault, and the second is
61 * the address at which the program should continue. No registers are
62 * modified, so it is entirely up to the continuation code to figure out
65 * All the routines below use bits of fixup code that are out of line
66 * with the main instruction path. This means when everything is well,
67 * we don't even have to jump over them. Further, they do not intrude
68 * on our cache or tlb entries.
71 struct exception_table_entry
73 unsigned long insn, fixup;
78 * These are the main single-value transfer routines. They automatically
79 * use the right size if we just have the right pointer type.
81 * This gets kind of ugly. We want to return _two_ values in "get_user()"
82 * and yet we don't want to do any pointers, because that is too much
83 * of a performance impact. Thus we have a few rather ugly macros here,
84 * and hide all the ugliness from the user.
86 * The "__xxx" versions of the user access functions are versions that
87 * do not verify the address space, that must have been done previously
88 * with a separate "access_ok()" call (this is used when we do multiple
89 * accesses to the same area of user memory).
92 extern void __get_user_1(void);
93 extern void __get_user_2(void);
94 extern void __get_user_4(void);
95 extern void __get_user_8(void);
97 #define __get_user_x(size,ret,x,ptr) \
98 __asm__ __volatile__("call __get_user_" #size \
99 :"=a" (ret),"=d" (x) \
103 /* Careful: we have to cast the result to the type of the pointer for sign reasons */
104 #define get_user(x,ptr) \
105 ({ unsigned long __val_gu; \
107 __chk_user_ptr(ptr); \
108 switch(sizeof (*(ptr))) { \
109 case 1: __get_user_x(1,__ret_gu,__val_gu,ptr); break; \
110 case 2: __get_user_x(2,__ret_gu,__val_gu,ptr); break; \
111 case 4: __get_user_x(4,__ret_gu,__val_gu,ptr); break; \
112 case 8: __get_user_x(8,__ret_gu,__val_gu,ptr); break; \
113 default: __get_user_bad(); break; \
115 (x) = (__typeof__(*(ptr)))__val_gu; \
119 extern void __put_user_1(void);
120 extern void __put_user_2(void);
121 extern void __put_user_4(void);
122 extern void __put_user_8(void);
124 extern void __put_user_bad(void);
126 #define __put_user_x(size,ret,x,ptr) \
127 __asm__ __volatile__("call __put_user_" #size \
132 #define put_user(x,ptr) \
133 __put_user_check((__typeof__(*(ptr)))(x),(ptr),sizeof(*(ptr)))
135 #define __get_user(x,ptr) \
136 __get_user_nocheck((x),(ptr),sizeof(*(ptr)))
137 #define __put_user(x,ptr) \
138 __put_user_nocheck((__typeof__(*(ptr)))(x),(ptr),sizeof(*(ptr)))
140 #define __get_user_unaligned __get_user
141 #define __put_user_unaligned __put_user
143 #define __put_user_nocheck(x,ptr,size) \
146 __put_user_size((x),(ptr),(size),__pu_err); \
151 #define __put_user_check(x,ptr,size) \
153 int __pu_err = -EFAULT; \
154 __typeof__(*(ptr)) __user *__pu_addr = (ptr); \
155 if (likely(access_ok(VERIFY_WRITE,__pu_addr,size))) \
156 __put_user_size((x),__pu_addr,(size),__pu_err); \
160 #define __put_user_size(x,ptr,size,retval) \
163 __chk_user_ptr(ptr); \
165 case 1: __put_user_asm(x,ptr,retval,"b","b","iq",-EFAULT); break;\
166 case 2: __put_user_asm(x,ptr,retval,"w","w","ir",-EFAULT); break;\
167 case 4: __put_user_asm(x,ptr,retval,"l","k","ir",-EFAULT); break;\
168 case 8: __put_user_asm(x,ptr,retval,"q","","ir",-EFAULT); break;\
169 default: __put_user_bad(); \
173 /* FIXME: this hack is definitely wrong -AK */
174 struct __large_struct { unsigned long buf[100]; };
175 #define __m(x) (*(struct __large_struct __user *)(x))
178 * Tell gcc we read from memory instead of writing: this is because
179 * we do not write to any memory gcc knows about, so there are no
182 #define __put_user_asm(x, addr, err, itype, rtype, ltype, errno) \
183 __asm__ __volatile__( \
184 "1: mov"itype" %"rtype"1,%2\n" \
186 ".section .fixup,\"ax\"\n" \
190 ".section __ex_table,\"a\"\n" \
195 : ltype (x), "m"(__m(addr)), "i"(errno), "0"(err))
198 #define __get_user_nocheck(x,ptr,size) \
201 unsigned long __gu_val; \
202 __get_user_size(__gu_val,(ptr),(size),__gu_err); \
203 (x) = (__typeof__(*(ptr)))__gu_val; \
207 extern int __get_user_bad(void);
209 #define __get_user_size(x,ptr,size,retval) \
212 __chk_user_ptr(ptr); \
214 case 1: __get_user_asm(x,ptr,retval,"b","b","=q",-EFAULT); break;\
215 case 2: __get_user_asm(x,ptr,retval,"w","w","=r",-EFAULT); break;\
216 case 4: __get_user_asm(x,ptr,retval,"l","k","=r",-EFAULT); break;\
217 case 8: __get_user_asm(x,ptr,retval,"q","","=r",-EFAULT); break;\
218 default: (x) = __get_user_bad(); \
222 #define __get_user_asm(x, addr, err, itype, rtype, ltype, errno) \
223 __asm__ __volatile__( \
224 "1: mov"itype" %2,%"rtype"1\n" \
226 ".section .fixup,\"ax\"\n" \
228 " xor"itype" %"rtype"1,%"rtype"1\n" \
231 ".section __ex_table,\"a\"\n" \
235 : "=r"(err), ltype (x) \
236 : "m"(__m(addr)), "i"(errno), "0"(err))
239 * Copy To/From Userspace
242 /* Handles exceptions in both to and from, but doesn't do access_ok */
243 extern unsigned long copy_user_generic(void *to, const void *from, unsigned len);
245 extern unsigned long copy_to_user(void __user *to, const void *from, unsigned len);
246 extern unsigned long copy_from_user(void *to, const void __user *from, unsigned len);
247 extern unsigned long copy_in_user(void __user *to, const void __user *from, unsigned len);
249 static inline int __copy_from_user(void *dst, const void __user *src, unsigned size)
252 if (!__builtin_constant_p(size))
253 return copy_user_generic(dst,(__force void *)src,size);
255 case 1:__get_user_asm(*(u8*)dst,(u8 __user *)src,ret,"b","b","=q",1);
257 case 2:__get_user_asm(*(u16*)dst,(u16 __user *)src,ret,"w","w","=r",2);
259 case 4:__get_user_asm(*(u32*)dst,(u32 __user *)src,ret,"l","k","=r",4);
261 case 8:__get_user_asm(*(u64*)dst,(u64 __user *)src,ret,"q","","=r",8);
264 __get_user_asm(*(u64*)dst,(u64 __user *)src,ret,"q","","=r",16);
265 if (unlikely(ret)) return ret;
266 __get_user_asm(*(u16*)(8+(char*)dst),(u16 __user *)(8+(char __user *)src),ret,"w","w","=r",2);
269 __get_user_asm(*(u64*)dst,(u64 __user *)src,ret,"q","","=r",16);
270 if (unlikely(ret)) return ret;
271 __get_user_asm(*(u64*)(8+(char*)dst),(u64 __user *)(8+(char __user *)src),ret,"q","","=r",8);
274 return copy_user_generic(dst,(__force void *)src,size);
278 static inline int __copy_to_user(void __user *dst, const void *src, unsigned size)
281 if (!__builtin_constant_p(size))
282 return copy_user_generic((__force void *)dst,src,size);
284 case 1:__put_user_asm(*(u8*)src,(u8 __user *)dst,ret,"b","b","iq",1);
286 case 2:__put_user_asm(*(u16*)src,(u16 __user *)dst,ret,"w","w","ir",2);
288 case 4:__put_user_asm(*(u32*)src,(u32 __user *)dst,ret,"l","k","ir",4);
290 case 8:__put_user_asm(*(u64*)src,(u64 __user *)dst,ret,"q","","ir",8);
293 __put_user_asm(*(u64*)src,(u64 __user *)dst,ret,"q","","ir",10);
294 if (unlikely(ret)) return ret;
296 __put_user_asm(4[(u16*)src],4+(u16 __user *)dst,ret,"w","w","ir",2);
299 __put_user_asm(*(u64*)src,(u64 __user *)dst,ret,"q","","ir",16);
300 if (unlikely(ret)) return ret;
302 __put_user_asm(1[(u64*)src],1+(u64 __user *)dst,ret,"q","","ir",8);
305 return copy_user_generic((__force void *)dst,src,size);
310 static inline int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
313 if (!__builtin_constant_p(size))
314 return copy_user_generic((__force void *)dst,(__force void *)src,size);
318 __get_user_asm(tmp,(u8 __user *)src,ret,"b","b","=q",1);
320 __put_user_asm(tmp,(u8 __user *)dst,ret,"b","b","iq",1);
325 __get_user_asm(tmp,(u16 __user *)src,ret,"w","w","=r",2);
327 __put_user_asm(tmp,(u16 __user *)dst,ret,"w","w","ir",2);
333 __get_user_asm(tmp,(u32 __user *)src,ret,"l","k","=r",4);
335 __put_user_asm(tmp,(u32 __user *)dst,ret,"l","k","ir",4);
340 __get_user_asm(tmp,(u64 __user *)src,ret,"q","","=r",8);
342 __put_user_asm(tmp,(u64 __user *)dst,ret,"q","","ir",8);
346 return copy_user_generic((__force void *)dst,(__force void *)src,size);
350 long strncpy_from_user(char *dst, const char __user *src, long count);
351 long __strncpy_from_user(char *dst, const char __user *src, long count);
352 long strnlen_user(const char __user *str, long n);
353 long strlen_user(const char __user *str);
354 unsigned long clear_user(void __user *mem, unsigned long len);
355 unsigned long __clear_user(void __user *mem, unsigned long len);
357 #define __copy_to_user_inatomic __copy_to_user
358 #define __copy_from_user_inatomic __copy_from_user
360 #endif /* __X86_64_UACCESS_H */