X-Git-Url: http://git.onelab.eu/?a=blobdiff_plain;f=include%2Fasm-i386%2Fuaccess.h;h=371457b1ceb6de2b47e8fcf7d125f8e57488cfb5;hb=9464c7cf61b9433057924c36e6e02f303a00e768;hp=54d905ebc63dd9738e00f592ee0787b371ef1e62;hpb=41689045f6a3cbe0550e1d34e9cc20d2e8c432ba;p=linux-2.6.git diff --git a/include/asm-i386/uaccess.h b/include/asm-i386/uaccess.h index 54d905ebc..371457b1c 100644 --- a/include/asm-i386/uaccess.h +++ b/include/asm-i386/uaccess.h @@ -4,6 +4,7 @@ /* * User space memory access functions */ +#include #include #include #include @@ -58,7 +59,7 @@ extern struct movsl_mask { __chk_user_ptr(addr); \ asm("addl %3,%1 ; sbbl %0,%0; cmpl %1,%4; sbbl $0,%0" \ :"=&r" (flag), "=r" (sum) \ - :"1" (addr),"g" ((int)(size)),"rm" (current_thread_info()->addr_limit.seg)); \ + :"1" (addr),"g" ((int)(size)),"g" (current_thread_info()->addr_limit.seg)); \ flag; }) /** @@ -390,12 +391,6 @@ unsigned long __must_check __copy_to_user_ll(void __user *to, const void *from, unsigned long n); unsigned long __must_check __copy_from_user_ll(void *to, const void __user *from, unsigned long n); -unsigned long __must_check __copy_from_user_ll_nozero(void *to, - const void __user *from, unsigned long n); -unsigned long __must_check __copy_from_user_ll_nocache(void *to, - const void __user *from, unsigned long n); -unsigned long __must_check __copy_from_user_ll_nocache_nozero(void *to, - const void __user *from, unsigned long n); /* * Here we special-case 1, 2 and 4-byte copy_*_user invocations. On a fault @@ -462,41 +457,10 @@ __copy_to_user(void __user *to, const void *from, unsigned long n) * * If some data could not be copied, this function will pad the copied * data to the requested size using zero bytes. - * - * An alternate version - __copy_from_user_inatomic() - may be called from - * atomic context and will fail rather than sleep. In this case the - * uncopied bytes will *NOT* be padded with zeros. See fs/filemap.h - * for explanation of why this is needed. */ static __always_inline unsigned long __copy_from_user_inatomic(void *to, const void __user *from, unsigned long n) { - /* Avoid zeroing the tail if the copy fails.. - * If 'n' is constant and 1, 2, or 4, we do still zero on a failure, - * but as the zeroing behaviour is only significant when n is not - * constant, that shouldn't be a problem. - */ - if (__builtin_constant_p(n)) { - unsigned long ret; - - switch (n) { - case 1: - __get_user_size(*(u8 *)to, from, 1, ret, 1); - return ret; - case 2: - __get_user_size(*(u16 *)to, from, 2, ret, 2); - return ret; - case 4: - __get_user_size(*(u32 *)to, from, 4, ret, 4); - return ret; - } - } - return __copy_from_user_ll_nozero(to, from, n); -} -static __always_inline unsigned long -__copy_from_user(void *to, const void __user *from, unsigned long n) -{ - might_sleep(); if (__builtin_constant_p(n)) { unsigned long ret; @@ -515,36 +479,12 @@ __copy_from_user(void *to, const void __user *from, unsigned long n) return __copy_from_user_ll(to, from, n); } -#define ARCH_HAS_NOCACHE_UACCESS - -static __always_inline unsigned long __copy_from_user_nocache(void *to, - const void __user *from, unsigned long n) -{ - might_sleep(); - if (__builtin_constant_p(n)) { - unsigned long ret; - - switch (n) { - case 1: - __get_user_size(*(u8 *)to, from, 1, ret, 1); - return ret; - case 2: - __get_user_size(*(u16 *)to, from, 2, ret, 2); - return ret; - case 4: - __get_user_size(*(u32 *)to, from, 4, ret, 4); - return ret; - } - } - return __copy_from_user_ll_nocache(to, from, n); -} - static __always_inline unsigned long -__copy_from_user_inatomic_nocache(void *to, const void __user *from, unsigned long n) +__copy_from_user(void *to, const void __user *from, unsigned long n) { - return __copy_from_user_ll_nocache_nozero(to, from, n); + might_sleep(); + return __copy_from_user_inatomic(to, from, n); } - unsigned long __must_check copy_to_user(void __user *to, const void *from, unsigned long n); unsigned long __must_check copy_from_user(void *to,