X-Git-Url: http://git.onelab.eu/?a=blobdiff_plain;f=include%2Fasm-i386%2Fuaccess.h;h=eef5133b9ce2a03b73b5f4c73f0e771a12b52aa8;hb=a2f44b27303a5353859d77a3e96a1d3f33f56ab7;hp=3f1337c342087561483c5d604396a227ce4664ce;hpb=134734d875a0a48d994ef20b9905209b4b8b6f75;p=linux-2.6.git diff --git a/include/asm-i386/uaccess.h b/include/asm-i386/uaccess.h index 3f1337c34..eef5133b9 100644 --- a/include/asm-i386/uaccess.h +++ b/include/asm-i386/uaccess.h @@ -4,7 +4,6 @@ /* * User space memory access functions */ -#include #include #include #include @@ -59,7 +58,7 @@ extern struct movsl_mask { __chk_user_ptr(addr); \ asm("addl %3,%1 ; sbbl %0,%0; cmpl %1,%4; sbbl $0,%0" \ :"=&r" (flag), "=r" (sum) \ - :"1" (addr),"g" ((int)(size)),"g" (current_thread_info()->addr_limit.seg)); \ + :"1" (addr),"g" ((int)(size)),"rm" (current_thread_info()->addr_limit.seg)); \ flag; }) /** @@ -197,13 +196,15 @@ extern void __put_user_8(void); #define put_user(x,ptr) \ ({ int __ret_pu; \ + __typeof__(*(ptr)) __pu_val; \ __chk_user_ptr(ptr); \ + __pu_val = x; \ switch(sizeof(*(ptr))) { \ - case 1: __put_user_1(x, ptr); break; \ - case 2: __put_user_2(x, ptr); break; \ - case 4: __put_user_4(x, ptr); break; \ - case 8: __put_user_8(x, ptr); break; \ - default:__put_user_X(x, ptr); break; \ + case 1: __put_user_1(__pu_val, ptr); break; \ + case 2: __put_user_2(__pu_val, ptr); break; \ + case 4: __put_user_4(__pu_val, ptr); break; \ + case 8: __put_user_8(__pu_val, ptr); break; \ + default:__put_user_X(__pu_val, ptr); break; \ } \ __ret_pu; \ }) @@ -389,6 +390,12 @@ unsigned long __must_check __copy_to_user_ll(void __user *to, const void *from, unsigned long n); unsigned long __must_check __copy_from_user_ll(void *to, const void __user *from, unsigned long n); +unsigned long __must_check __copy_from_user_ll_nozero(void *to, + const void __user *from, unsigned long n); +unsigned long __must_check __copy_from_user_ll_nocache(void *to, + const void __user *from, unsigned long n); +unsigned long __must_check __copy_from_user_ll_nocache_nozero(void *to, + const void __user *from, unsigned long n); /* * Here we special-case 1, 2 and 4-byte copy_*_user invocations. On a fault @@ -397,6 +404,27 @@ unsigned long __must_check __copy_from_user_ll(void *to, * anything, so this is accurate. */ +static __always_inline unsigned long __must_check +__copy_to_user_inatomic(void __user *to, const void *from, unsigned long n) +{ + if (__builtin_constant_p(n)) { + unsigned long ret; + + switch (n) { + case 1: + __put_user_size(*(u8 *)from, (u8 __user *)to, 1, ret, 1); + return ret; + case 2: + __put_user_size(*(u16 *)from, (u16 __user *)to, 2, ret, 2); + return ret; + case 4: + __put_user_size(*(u32 *)from, (u32 __user *)to, 4, ret, 4); + return ret; + } + } + return __copy_to_user_ll(to, from, n); +} + /** * __copy_to_user: - Copy a block of data into user space, with less checking. * @to: Destination address, in user space. @@ -412,31 +440,36 @@ unsigned long __must_check __copy_from_user_ll(void *to, * On success, this will be zero. */ static __always_inline unsigned long __must_check -__copy_to_user_inatomic(void __user *to, const void *from, unsigned long n) +__copy_to_user(void __user *to, const void *from, unsigned long n) +{ + might_sleep(); + return __copy_to_user_inatomic(to, from, n); +} + +static __always_inline unsigned long +__copy_from_user_inatomic(void *to, const void __user *from, unsigned long n) { + /* Avoid zeroing the tail if the copy fails.. + * If 'n' is constant and 1, 2, or 4, we do still zero on a failure, + * but as the zeroing behaviour is only significant when n is not + * constant, that shouldn't be a problem. + */ if (__builtin_constant_p(n)) { unsigned long ret; switch (n) { case 1: - __put_user_size(*(u8 *)from, (u8 __user *)to, 1, ret, 1); + __get_user_size(*(u8 *)to, from, 1, ret, 1); return ret; case 2: - __put_user_size(*(u16 *)from, (u16 __user *)to, 2, ret, 2); + __get_user_size(*(u16 *)to, from, 2, ret, 2); return ret; case 4: - __put_user_size(*(u32 *)from, (u32 __user *)to, 4, ret, 4); + __get_user_size(*(u32 *)to, from, 4, ret, 4); return ret; } } - return __copy_to_user_ll(to, from, n); -} - -static __always_inline unsigned long __must_check -__copy_to_user(void __user *to, const void *from, unsigned long n) -{ - might_sleep(); - return __copy_to_user_inatomic(to, from, n); + return __copy_from_user_ll_nozero(to, from, n); } /** @@ -455,10 +488,16 @@ __copy_to_user(void __user *to, const void *from, unsigned long n) * * If some data could not be copied, this function will pad the copied * data to the requested size using zero bytes. + * + * An alternate version - __copy_from_user_inatomic() - may be called from + * atomic context and will fail rather than sleep. In this case the + * uncopied bytes will *NOT* be padded with zeros. See fs/filemap.h + * for explanation of why this is needed. */ static __always_inline unsigned long -__copy_from_user_inatomic(void *to, const void __user *from, unsigned long n) +__copy_from_user(void *to, const void __user *from, unsigned long n) { + might_sleep(); if (__builtin_constant_p(n)) { unsigned long ret; @@ -477,12 +516,36 @@ __copy_from_user_inatomic(void *to, const void __user *from, unsigned long n) return __copy_from_user_ll(to, from, n); } +#define ARCH_HAS_NOCACHE_UACCESS + +static __always_inline unsigned long __copy_from_user_nocache(void *to, + const void __user *from, unsigned long n) +{ + might_sleep(); + if (__builtin_constant_p(n)) { + unsigned long ret; + + switch (n) { + case 1: + __get_user_size(*(u8 *)to, from, 1, ret, 1); + return ret; + case 2: + __get_user_size(*(u16 *)to, from, 2, ret, 2); + return ret; + case 4: + __get_user_size(*(u32 *)to, from, 4, ret, 4); + return ret; + } + } + return __copy_from_user_ll_nocache(to, from, n); +} + static __always_inline unsigned long -__copy_from_user(void *to, const void __user *from, unsigned long n) +__copy_from_user_inatomic_nocache(void *to, const void __user *from, unsigned long n) { - might_sleep(); - return __copy_from_user_inatomic(to, from, n); + return __copy_from_user_ll_nocache_nozero(to, from, n); } + unsigned long __must_check copy_to_user(void __user *to, const void *from, unsigned long n); unsigned long __must_check copy_from_user(void *to,