X-Git-Url: http://git.onelab.eu/?a=blobdiff_plain;f=include%2Fasm-ia64%2Fuaccess.h;h=449c8c0fa2bdc65165ff62248fdb4dbf8cba129e;hb=refs%2Fheads%2Fvserver;hp=68f0e1deefe8fd57617c779c6770802aee5b4ed1;hpb=c7b5ebbddf7bcd3651947760f423e3783bbe6573;p=linux-2.6.git diff --git a/include/asm-ia64/uaccess.h b/include/asm-ia64/uaccess.h index 68f0e1dee..449c8c0fa 100644 --- a/include/asm-ia64/uaccess.h +++ b/include/asm-ia64/uaccess.h @@ -35,9 +35,12 @@ #include #include #include +#include +#include #include #include +#include /* * For historical reasons, the following macros are grossly misnamed: @@ -69,12 +72,6 @@ }) #define access_ok(type, addr, size) __access_ok((addr), (size), get_fs()) -static inline int -verify_area (int type, const void __user *addr, unsigned long size) -{ - return access_ok(type, addr, size) ? 0 : -EFAULT; -} - /* * These are the main single-value transfer routines. They automatically * use the right size if we just have the right pointer type. @@ -132,7 +129,7 @@ extern long __get_user_unaligned_unknown (void); #ifdef ASM_SUPPORTED struct __large_struct { unsigned long buf[100]; }; -# define __m(x) (*(struct __large_struct *)(x)) +# define __m(x) (*(struct __large_struct __user *)(x)) /* We need to declare the __ex_table section before we can use it in .xdata. */ asm (".section \"__ex_table\", \"a\"\n\t.previous"); @@ -190,8 +187,8 @@ extern void __get_user_unknown (void); ({ \ const __typeof__(*(ptr)) __user *__gu_ptr = (ptr); \ __typeof__ (size) __gu_size = (size); \ - long __gu_err = -EFAULT, __gu_val = 0; \ - \ + long __gu_err = -EFAULT; \ + unsigned long __gu_val = 0; \ if (!check || __access_ok(__gu_ptr, size, segment)) \ switch (__gu_size) { \ case 1: __get_user_size(__gu_val, __gu_ptr, 1, __gu_err); break; \ @@ -243,13 +240,13 @@ extern unsigned long __must_check __copy_user (void __user *to, const void __use static inline unsigned long __copy_to_user (void __user *to, const void *from, unsigned long count) { - return __copy_user(to, (void __user *) from, count); + return __copy_user(to, (__force void __user *) from, count); } static inline unsigned long __copy_from_user (void *to, const void __user *from, unsigned long count) { - return __copy_user((void __user *) to, from, count); + return __copy_user((__force void __user *) to, from, count); } #define __copy_to_user_inatomic __copy_to_user @@ -261,7 +258,7 @@ __copy_from_user (void *to, const void __user *from, unsigned long count) long __cu_len = (n); \ \ if (__access_ok(__cu_to, __cu_len, get_fs())) \ - __cu_len = __copy_user(__cu_to, (void __user *) __cu_from, __cu_len); \ + __cu_len = __copy_user(__cu_to, (__force void __user *) __cu_from, __cu_len); \ __cu_len; \ }) @@ -273,7 +270,7 @@ __copy_from_user (void *to, const void __user *from, unsigned long count) \ __chk_user_ptr(__cu_from); \ if (__access_ok(__cu_from, __cu_len, get_fs())) \ - __cu_len = __copy_user((void __user *) __cu_to, __cu_from, __cu_len); \ + __cu_len = __copy_user((__force void __user *) __cu_to, __cu_from, __cu_len); \ __cu_len; \ }) @@ -367,4 +364,38 @@ ia64_done_with_exception (struct pt_regs *regs) return 0; } +#define ARCH_HAS_TRANSLATE_MEM_PTR 1 +static __inline__ char * +xlate_dev_mem_ptr (unsigned long p) +{ + struct page *page; + char * ptr; + + page = pfn_to_page(p >> PAGE_SHIFT); + if (PageUncached(page)) + ptr = (char *)p + __IA64_UNCACHED_OFFSET; + else + ptr = __va(p); + + return ptr; +} + +/* + * Convert a virtual cached kernel memory pointer to an uncached pointer + */ +static __inline__ char * +xlate_dev_kmem_ptr (char * p) +{ + struct page *page; + char * ptr; + + page = virt_to_page((unsigned long)p); + if (PageUncached(page)) + ptr = (char *)__pa(p) + __IA64_UNCACHED_OFFSET; + else + ptr = p; + + return ptr; +} + #endif /* _ASM_IA64_UACCESS_H */