X-Git-Url: http://git.onelab.eu/?a=blobdiff_plain;f=include%2Fasm-mips%2Fpage.h;h=35e896717fdf9c2eab3113d93cfe75884b78e811;hb=refs%2Fheads%2Fvserver;hp=47ca67f25e54e98193038f10b7869c16db7741b3;hpb=9bf4aaab3e101692164d49b7ca357651eb691cb6;p=linux-2.6.git diff --git a/include/asm-mips/page.h b/include/asm-mips/page.h index 47ca67f25..35e896717 100644 --- a/include/asm-mips/page.h +++ b/include/asm-mips/page.h @@ -9,7 +9,6 @@ #ifndef _ASM_PAGE_H #define _ASM_PAGE_H -#include #ifdef __KERNEL__ @@ -31,10 +30,13 @@ #define PAGE_SHIFT 16 #endif #define PAGE_SIZE (1UL << PAGE_SHIFT) -#define PAGE_MASK (~(PAGE_SIZE-1)) +#define PAGE_MASK (~((1 << PAGE_SHIFT) - 1)) #ifndef __ASSEMBLY__ +#include +#include + extern void clear_page(void * page); extern void copy_page(void * to, void * from); @@ -54,76 +56,116 @@ static inline void clear_user_page(void *addr, unsigned long vaddr, extern void (*flush_data_cache_page)(unsigned long addr); clear_page(addr); - if (pages_do_alias((unsigned long) addr, vaddr)) + if (pages_do_alias((unsigned long) addr, vaddr & PAGE_MASK)) flush_data_cache_page((unsigned long)addr); } -static inline void copy_user_page(void *vto, void *vfrom, unsigned long vaddr, - struct page *to) -{ - extern void (*flush_data_cache_page)(unsigned long addr); +extern void copy_user_page(void *vto, void *vfrom, unsigned long vaddr, + struct page *to); +struct vm_area_struct; +extern void copy_user_highpage(struct page *to, struct page *from, + unsigned long vaddr, struct vm_area_struct *vma); - copy_page(vto, vfrom); - if (pages_do_alias((unsigned long)vto, vaddr)) - flush_data_cache_page((unsigned long)vto); -} +#define __HAVE_ARCH_COPY_USER_HIGHPAGE /* * These are used to make use of C type-checking.. */ #ifdef CONFIG_64BIT_PHYS_ADDR -typedef struct { unsigned long long pte; } pte_t; + #ifdef CONFIG_CPU_MIPS32 + typedef struct { unsigned long pte_low, pte_high; } pte_t; + #define pte_val(x) ((x).pte_low | ((unsigned long long)(x).pte_high << 32)) + #define __pte(x) ({ pte_t __pte = {(x), ((unsigned long long)(x)) >> 32}; __pte; }) + #else + typedef struct { unsigned long long pte; } pte_t; + #define pte_val(x) ((x).pte) + #define __pte(x) ((pte_t) { (x) } ) + #endif #else typedef struct { unsigned long pte; } pte_t; +#define pte_val(x) ((x).pte) +#define __pte(x) ((pte_t) { (x) } ) #endif -typedef struct { unsigned long pmd; } pmd_t; -typedef struct { unsigned long pgd; } pgd_t; -typedef struct { unsigned long pgprot; } pgprot_t; -#define pte_val(x) ((x).pte) +/* + * For 3-level pagetables we defines these ourselves, for 2-level the + * definitions are supplied by . + */ +#ifdef CONFIG_64BIT + +typedef struct { unsigned long pmd; } pmd_t; #define pmd_val(x) ((x).pmd) -#define pgd_val(x) ((x).pgd) -#define pgprot_val(x) ((x).pgprot) +#define __pmd(x) ((pmd_t) { (x) } ) -#define ptep_buddy(x) ((pte_t *)((unsigned long)(x) ^ sizeof(pte_t))) +#endif -#define __pte(x) ((pte_t) { (x) } ) -#define __pmd(x) ((pmd_t) { (x) } ) +/* + * Right now we don't support 4-level pagetables, so all pud-related + * definitions come from . + */ + +/* + * Finall the top of the hierarchy, the pgd + */ +typedef struct { unsigned long pgd; } pgd_t; +#define pgd_val(x) ((x).pgd) #define __pgd(x) ((pgd_t) { (x) } ) + +/* + * Manipulate page protection bits + */ +typedef struct { unsigned long pgprot; } pgprot_t; +#define pgprot_val(x) ((x).pgprot) #define __pgprot(x) ((pgprot_t) { (x) } ) -/* Pure 2^n version of get_order */ -static __inline__ int get_order(unsigned long size) -{ - int order; - - size = (size-1) >> (PAGE_SHIFT-1); - order = -1; - do { - size >>= 1; - order++; - } while (size); - return order; -} +/* + * On R4000-style MMUs where a TLB entry is mapping a adjacent even / odd + * pair of pages we only have a single global bit per pair of pages. When + * writing to the TLB make sure we always have the bit set for both pages + * or none. This macro is used to access the `buddy' of the pte we're just + * working on. + */ +#define ptep_buddy(x) ((pte_t *)((unsigned long)(x) ^ sizeof(pte_t))) #endif /* !__ASSEMBLY__ */ /* to align the pointer to the (next) page boundary */ #define PAGE_ALIGN(addr) (((addr) + PAGE_SIZE - 1) & PAGE_MASK) -#define __pa(x) ((unsigned long) (x) - PAGE_OFFSET) -#define __va(x) ((void *)((unsigned long) (x) + PAGE_OFFSET)) +#if defined(CONFIG_64BIT) && !defined(CONFIG_BUILD_ELF64) +#define __pa_page_offset(x) ((unsigned long)(x) < CKSEG0 ? PAGE_OFFSET : CKSEG0) +#else +#define __pa_page_offset(x) PAGE_OFFSET +#endif +#define __pa(x) ((unsigned long)(x) - __pa_page_offset(x)) +#define __pa_symbol(x) __pa(RELOC_HIDE((unsigned long)(x),0)) +#define __va(x) ((void *)((unsigned long)(x) + PAGE_OFFSET)) #define pfn_to_kaddr(pfn) __va((pfn) << PAGE_SHIFT) -#ifndef CONFIG_DISCONTIGMEM -#define pfn_to_page(pfn) (mem_map + (pfn)) -#define page_to_pfn(page) ((unsigned long)((page) - mem_map)) +#ifdef CONFIG_FLATMEM + #define pfn_valid(pfn) ((pfn) < max_mapnr) + +#elif defined(CONFIG_SPARSEMEM) + +/* pfn_valid is defined in linux/mmzone.h */ + +#elif defined(CONFIG_NEED_MULTIPLE_NODES) + +#define pfn_valid(pfn) \ +({ \ + unsigned long __pfn = (pfn); \ + int __n = pfn_to_nid(__pfn); \ + ((__n >= 0) ? (__pfn < NODE_DATA(__n)->node_start_pfn + \ + NODE_DATA(__n)->node_spanned_pages) \ + : 0); \ +}) + #endif -#define virt_to_page(kaddr) pfn_to_page(__pa(kaddr) >> PAGE_SHIFT) -#define virt_addr_valid(kaddr) pfn_valid(__pa(kaddr) >> PAGE_SHIFT) +#define virt_to_page(kaddr) pfn_to_page(PFN_DOWN(virt_to_phys(kaddr))) +#define virt_addr_valid(kaddr) pfn_valid(PFN_DOWN(virt_to_phys(kaddr))) #define VM_DATA_DEFAULT_FLAGS (VM_READ | VM_WRITE | VM_EXEC | \ VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC) @@ -131,10 +173,15 @@ static __inline__ int get_order(unsigned long size) #define UNCAC_ADDR(addr) ((addr) - PAGE_OFFSET + UNCAC_BASE) #define CAC_ADDR(addr) ((addr) - UNCAC_BASE + PAGE_OFFSET) -#endif /* defined (__KERNEL__) */ - #ifdef CONFIG_LIMITED_DMA #define WANT_PAGE_VIRTUAL #endif +#include +#include + +#endif /* defined (__KERNEL__) */ + +#define devmem_is_allowed(x) 1 + #endif /* _ASM_PAGE_H */