X-Git-Url: http://git.onelab.eu/?a=blobdiff_plain;f=include%2Fasm-parisc%2Fuaccess.h;h=d973e8b3466ca805afbdf52a846211434f2e8269;hb=97bf2856c6014879bd04983a3e9dfcdac1e7fe85;hp=f147bac9db50b9691fb6e990b379e1e98c84c9ef;hpb=5273a3df6485dc2ad6aa7ddd441b9a21970f003b;p=linux-2.6.git diff --git a/include/asm-parisc/uaccess.h b/include/asm-parisc/uaccess.h index f147bac9d..d973e8b34 100644 --- a/include/asm-parisc/uaccess.h +++ b/include/asm-parisc/uaccess.h @@ -8,6 +8,7 @@ #include #include #include +#include #define VERIFY_READ 0 #define VERIFY_WRITE 1 @@ -23,7 +24,7 @@ /* * Note that since kernel addresses are in a separate address space on - * parisc, we don't need to do anything for access_ok() or verify_area(). + * parisc, we don't need to do anything for access_ok(). * We just let the page fault handler do the right thing. This also means * that put_user is the same as __put_user, etc. */ @@ -33,8 +34,11 @@ extern int __get_user_bad(void); extern int __put_kernel_bad(void); extern int __put_user_bad(void); -#define access_ok(type,addr,size) (1) -#define verify_area(type,addr,size) (0) +static inline long access_ok(int type, const void __user * addr, + unsigned long size) +{ + return 1; +} #define put_user __put_user #define get_user __get_user @@ -54,16 +58,22 @@ extern int __put_user_bad(void); /* * The exception table contains two values: the first is an address * for an instruction that is allowed to fault, and the second is - * the number of bytes to skip if a fault occurs. We also support in - * two bit flags: 0x2 tells the exception handler to clear register - * r9 and 0x1 tells the exception handler to put -EFAULT in r8. - * This allows us to handle the simple cases for put_user and - * get_user without having to have .fixup sections. + * the address to the fixup routine. */ struct exception_table_entry { unsigned long insn; /* address of insn that is allowed to fault. */ - long skip; /* pcoq skip | r9 clear flag | r8 -EFAULT flag */ + long fixup; /* fixup routine */ +}; + +/* + * The page fault handler stores, in a per-cpu area, the following information + * if a fixup routine is available. + */ +struct exception_data { + unsigned long fault_ip; + unsigned long fault_space; + unsigned long fault_addr; }; #define __get_user(x,ptr) \ @@ -97,64 +107,61 @@ struct exception_table_entry { #ifdef __LP64__ #define __get_kernel_asm(ldx,ptr) \ __asm__("\n1:\t" ldx "\t0(%2),%0\n" \ - "2:\n" \ - "\t.section __ex_table,\"aw\"\n" \ - "\t.dword\t1b\n" \ - "\t.dword\t(2b-1b)+3\n" \ - "\t.previous" \ + "\t.section __ex_table,\"aw\"\n" \ + "\t.dword\t1b,fixup_get_user_skip_1\n" \ + "\t.previous" \ : "=r"(__gu_val), "=r"(__gu_err) \ - : "r"(ptr), "1"(__gu_err)); + : "r"(ptr), "1"(__gu_err) \ + : "r1"); #define __get_user_asm(ldx,ptr) \ __asm__("\n1:\t" ldx "\t0(%%sr3,%2),%0\n" \ - "2:\n" \ - "\t.section __ex_table,\"aw\"\n" \ - "\t.dword\t1b\n" \ - "\t.dword\t(2b-1b)+3\n" \ - "\t.previous" \ + "\t.section __ex_table,\"aw\"\n" \ + "\t.dword\t1b,fixup_get_user_skip_1\n" \ + "\t.previous" \ : "=r"(__gu_val), "=r"(__gu_err) \ - : "r"(ptr), "1"(__gu_err)); + : "r"(ptr), "1"(__gu_err) \ + : "r1"); #else #define __get_kernel_asm(ldx,ptr) \ __asm__("\n1:\t" ldx "\t0(%2),%0\n" \ - "2:\n" \ - "\t.section __ex_table,\"aw\"\n" \ - "\t.word\t1b\n" \ - "\t.word\t(2b-1b)+3\n" \ - "\t.previous" \ + "\t.section __ex_table,\"aw\"\n" \ + "\t.word\t1b,fixup_get_user_skip_1\n" \ + "\t.previous" \ : "=r"(__gu_val), "=r"(__gu_err) \ - : "r"(ptr), "1"(__gu_err)); + : "r"(ptr), "1"(__gu_err) \ + : "r1"); #define __get_user_asm(ldx,ptr) \ __asm__("\n1:\t" ldx "\t0(%%sr3,%2),%0\n" \ - "2:\n" \ - "\t.section __ex_table,\"aw\"\n" \ - "\t.word\t1b\n" \ - "\t.word\t(2b-1b)+3\n" \ + "\t.section __ex_table,\"aw\"\n" \ + "\t.word\t1b,fixup_get_user_skip_1\n" \ "\t.previous" \ : "=r"(__gu_val), "=r"(__gu_err) \ - : "r"(ptr), "1"(__gu_err)); + : "r"(ptr), "1"(__gu_err) \ + : "r1"); #endif /* !__LP64__ */ #define __put_user(x,ptr) \ ({ \ - register long __pu_err __asm__ ("r8") = 0; \ + register long __pu_err __asm__ ("r8") = 0; \ + __typeof__(*(ptr)) __x = (__typeof__(*(ptr)))(x); \ \ if (segment_eq(get_fs(),KERNEL_DS)) { \ switch (sizeof(*(ptr))) { \ - case 1: __put_kernel_asm("stb",x,ptr); break; \ - case 2: __put_kernel_asm("sth",x,ptr); break; \ - case 4: __put_kernel_asm("stw",x,ptr); break; \ - case 8: STD_KERNEL(x,ptr); break; \ + case 1: __put_kernel_asm("stb",__x,ptr); break; \ + case 2: __put_kernel_asm("sth",__x,ptr); break; \ + case 4: __put_kernel_asm("stw",__x,ptr); break; \ + case 8: STD_KERNEL(__x,ptr); break; \ default: __put_kernel_bad(); break; \ } \ } \ else { \ switch (sizeof(*(ptr))) { \ - case 1: __put_user_asm("stb",x,ptr); break; \ - case 2: __put_user_asm("sth",x,ptr); break; \ - case 4: __put_user_asm("stw",x,ptr); break; \ - case 8: STD_USER(x,ptr); break; \ + case 1: __put_user_asm("stb",__x,ptr); break; \ + case 2: __put_user_asm("sth",__x,ptr); break; \ + case 4: __put_user_asm("stw",__x,ptr); break; \ + case 8: STD_USER(__x,ptr); break; \ default: __put_user_bad(); break; \ } \ } \ @@ -165,89 +172,85 @@ struct exception_table_entry { /* * The "__put_user/kernel_asm()" macros tell gcc they read from memory * instead of writing. This is because they do not write to any memory - * gcc knows about, so there are no aliasing issues. + * gcc knows about, so there are no aliasing issues. These macros must + * also be aware that "fixup_put_user_skip_[12]" are executed in the + * context of the fault, and any registers used there must be listed + * as clobbers. In this case only "r1" is used by the current routines. + * r8/r9 are already listed as err/val. */ #ifdef __LP64__ #define __put_kernel_asm(stx,x,ptr) \ __asm__ __volatile__ ( \ "\n1:\t" stx "\t%2,0(%1)\n" \ - "2:\n" \ - "\t.section __ex_table,\"aw\"\n" \ - "\t.dword\t1b\n" \ - "\t.dword\t(2b-1b)+1\n" \ - "\t.previous" \ + "\t.section __ex_table,\"aw\"\n" \ + "\t.dword\t1b,fixup_put_user_skip_1\n" \ + "\t.previous" \ : "=r"(__pu_err) \ - : "r"(ptr), "r"(x), "0"(__pu_err)) + : "r"(ptr), "r"(x), "0"(__pu_err) \ + : "r1") #define __put_user_asm(stx,x,ptr) \ __asm__ __volatile__ ( \ "\n1:\t" stx "\t%2,0(%%sr3,%1)\n" \ - "2:\n" \ - "\t.section __ex_table,\"aw\"\n" \ - "\t.dword\t1b\n" \ - "\t.dword\t(2b-1b)+1\n" \ + "\t.section __ex_table,\"aw\"\n" \ + "\t.dword\t1b,fixup_put_user_skip_1\n" \ "\t.previous" \ : "=r"(__pu_err) \ - : "r"(ptr), "r"(x), "0"(__pu_err)) + : "r"(ptr), "r"(x), "0"(__pu_err) \ + : "r1") #else #define __put_kernel_asm(stx,x,ptr) \ __asm__ __volatile__ ( \ "\n1:\t" stx "\t%2,0(%1)\n" \ - "2:\n" \ - "\t.section __ex_table,\"aw\"\n" \ - "\t.word\t1b\n" \ - "\t.word\t(2b-1b)+1\n" \ + "\t.section __ex_table,\"aw\"\n" \ + "\t.word\t1b,fixup_put_user_skip_1\n" \ "\t.previous" \ : "=r"(__pu_err) \ - : "r"(ptr), "r"(x), "0"(__pu_err)) + : "r"(ptr), "r"(x), "0"(__pu_err) \ + : "r1") #define __put_user_asm(stx,x,ptr) \ __asm__ __volatile__ ( \ "\n1:\t" stx "\t%2,0(%%sr3,%1)\n" \ - "2:\n" \ - "\t.section __ex_table,\"aw\"\n" \ - "\t.word\t1b\n" \ - "\t.word\t(2b-1b)+1\n" \ + "\t.section __ex_table,\"aw\"\n" \ + "\t.word\t1b,fixup_put_user_skip_1\n" \ "\t.previous" \ : "=r"(__pu_err) \ - : "r"(ptr), "r"(x), "0"(__pu_err)) - -static inline void __put_kernel_asm64(u64 x, void *ptr) -{ - u32 hi = x>>32; - u32 lo = x&0xffffffff; - __asm__ __volatile__ ( - "\n1:\tstw %1,0(%0)\n" - "\n2:\tstw %2,4(%0)\n" - "3:\n" - "\t.section __ex_table,\"aw\"\n" - "\t.word\t1b\n" - "\t.word\t(3b-1b)+1\n" - "\t.word\t2b\n" - "\t.word\t(3b-2b)+1\n" - "\t.previous" - : : "r"(ptr), "r"(hi), "r"(lo)); - -} - -static inline void __put_user_asm64(u64 x, void *ptr) -{ - u32 hi = x>>32; - u32 lo = x&0xffffffff; - __asm__ __volatile__ ( - "\n1:\tstw %1,0(%%sr3,%0)\n" - "\n2:\tstw %2,4(%%sr3,%0)\n" - "3:\n" - "\t.section __ex_table,\"aw\"\n" - "\t.word\t1b\n" - "\t.word\t(3b-1b)+1\n" - "\t.word\t2b\n" - "\t.word\t(3b-2b)+1\n" - "\t.previous" - : : "r"(ptr), "r"(hi), "r"(lo)); - -} + : "r"(ptr), "r"(x), "0"(__pu_err) \ + : "r1") + +#define __put_kernel_asm64(__val,ptr) do { \ + u64 __val64 = (u64)(__val); \ + u32 hi = (__val64) >> 32; \ + u32 lo = (__val64) & 0xffffffff; \ + __asm__ __volatile__ ( \ + "\n1:\tstw %2,0(%1)\n" \ + "\n2:\tstw %3,4(%1)\n" \ + "\t.section __ex_table,\"aw\"\n" \ + "\t.word\t1b,fixup_put_user_skip_2\n" \ + "\t.word\t2b,fixup_put_user_skip_1\n" \ + "\t.previous" \ + : "=r"(__pu_err) \ + : "r"(ptr), "r"(hi), "r"(lo), "0"(__pu_err) \ + : "r1"); \ +} while (0) + +#define __put_user_asm64(__val,ptr) do { \ + u64 __val64 = (u64)__val; \ + u32 hi = (__val64) >> 32; \ + u32 lo = (__val64) & 0xffffffff; \ + __asm__ __volatile__ ( \ + "\n1:\tstw %2,0(%%sr3,%1)\n" \ + "\n2:\tstw %3,4(%%sr3,%1)\n" \ + "\t.section __ex_table,\"aw\"\n" \ + "\t.word\t1b,fixup_get_user_skip_2\n" \ + "\t.word\t2b,fixup_get_user_skip_1\n" \ + "\t.previous" \ + : "=r"(__pu_err) \ + : "r"(ptr), "r"(hi), "r"(lo), "0"(__pu_err) \ + : "r1"); \ +} while (0) #endif /* !__LP64__ */ @@ -273,11 +276,13 @@ extern long lstrnlen_user(const char __user *,long); #define clear_user lclear_user #define __clear_user lclear_user -#define copy_from_user lcopy_from_user -#define __copy_from_user lcopy_from_user -#define copy_to_user lcopy_to_user -#define __copy_to_user lcopy_to_user -#define copy_in_user lcopy_in_user -#define __copy_in_user lcopy_in_user +unsigned long copy_to_user(void __user *dst, const void *src, unsigned long len); +#define __copy_to_user copy_to_user +unsigned long copy_from_user(void *dst, const void __user *src, unsigned long len); +#define __copy_from_user copy_from_user +unsigned long copy_in_user(void __user *dst, const void __user *src, unsigned long len); +#define __copy_in_user copy_in_user +#define __copy_to_user_inatomic __copy_to_user +#define __copy_from_user_inatomic __copy_from_user #endif /* __PARISC_UACCESS_H */