X-Git-Url: http://git.onelab.eu/?a=blobdiff_plain;f=include%2Fasm-ppc64%2Fpgtable.h;h=0f6990ad957d6ad599d70a55ccb3e5ed404b33cb;hb=6a77f38946aaee1cd85eeec6cf4229b204c15071;hp=5a4a18043ae1f44ac2f60ea27c2016eb037e3434;hpb=87fc8d1bb10cd459024a742c6a10961fefcef18f;p=linux-2.6.git diff --git a/include/asm-ppc64/pgtable.h b/include/asm-ppc64/pgtable.h index 5a4a18043..0f6990ad9 100644 --- a/include/asm-ppc64/pgtable.h +++ b/include/asm-ppc64/pgtable.h @@ -1,6 +1,8 @@ #ifndef _PPC64_PGTABLE_H #define _PPC64_PGTABLE_H +#include + /* * This file contains the functions and defines necessary to modify and use * the ppc64 hashed page table. @@ -66,12 +68,6 @@ #define IMALLOC_BASE (0xE000000080000000ul) #define IMALLOC_END (IMALLOC_BASE + PGTABLE_EA_MASK) -/* - * Define the address range mapped virt <-> physical - */ -#define KRANGE_START KERNELBASE -#define KRANGE_END (KRANGE_START + PGTABLE_EA_MASK) - /* * Define the user address range */ @@ -98,6 +94,7 @@ #define _PAGE_BUSY 0x0800 /* software: PTE & hash are busy */ #define _PAGE_SECONDARY 0x8000 /* software: HPTE is in secondary group */ #define _PAGE_GROUP_IX 0x7000 /* software: HPTE index within group */ +#define _PAGE_HUGE 0x10000 /* 16MB page */ /* Bits 0x7000 identify the index within an HPT Group */ #define _PAGE_HPTEFLAGS (_PAGE_BUSY | _PAGE_HASHPTE | _PAGE_SECONDARY | _PAGE_GROUP_IX) /* PAGE_MASK gives the right answer below, but only by accident */ @@ -157,19 +154,19 @@ extern unsigned long empty_zero_page[PAGE_SIZE/sizeof(unsigned long)]; #endif /* __ASSEMBLY__ */ /* shift to put page number into pte */ -#define PTE_SHIFT (16) +#define PTE_SHIFT (17) /* We allow 2^41 bytes of real memory, so we need 29 bits in the PMD * to give the PTE page number. The bottom two bits are for flags. */ #define PMD_TO_PTEPAGE_SHIFT (2) #ifdef CONFIG_HUGETLB_PAGE -#define _PMD_HUGEPAGE 0x00000001U -#define HUGEPTE_BATCH_SIZE (1<<(HPAGE_SHIFT-PMD_SHIFT)) #ifndef __ASSEMBLY__ int hash_huge_page(struct mm_struct *mm, unsigned long access, unsigned long ea, unsigned long vsid, int local); + +void hugetlb_mm_free_pgd(struct mm_struct *mm); #endif /* __ASSEMBLY__ */ #define HAVE_ARCH_UNMAPPED_AREA @@ -177,7 +174,7 @@ int hash_huge_page(struct mm_struct *mm, unsigned long access, #else #define hash_huge_page(mm,a,ea,vsid,local) -1 -#define _PMD_HUGEPAGE 0 +#define hugetlb_mm_free_pgd(mm) do {} while (0) #endif @@ -213,10 +210,8 @@ int hash_huge_page(struct mm_struct *mm, unsigned long access, #define pmd_set(pmdp, ptep) \ (pmd_val(*(pmdp)) = (__ba_to_bpn(ptep) << PMD_TO_PTEPAGE_SHIFT)) #define pmd_none(pmd) (!pmd_val(pmd)) -#define pmd_hugepage(pmd) (!!(pmd_val(pmd) & _PMD_HUGEPAGE)) -#define pmd_bad(pmd) (((pmd_val(pmd)) == 0) || pmd_hugepage(pmd)) -#define pmd_present(pmd) ((!pmd_hugepage(pmd)) \ - && (pmd_val(pmd) & ~_PMD_HUGEPAGE) != 0) +#define pmd_bad(pmd) (pmd_val(pmd) == 0) +#define pmd_present(pmd) (pmd_val(pmd) != 0) #define pmd_clear(pmdp) (pmd_val(*(pmdp)) = 0) #define pmd_page_kernel(pmd) \ (__bpn_to_ba(pmd_val(pmd) >> PMD_TO_PTEPAGE_SHIFT)) @@ -269,6 +264,7 @@ static inline int pte_exec(pte_t pte) { return pte_val(pte) & _PAGE_EXEC;} static inline int pte_dirty(pte_t pte) { return pte_val(pte) & _PAGE_DIRTY;} static inline int pte_young(pte_t pte) { return pte_val(pte) & _PAGE_ACCESSED;} static inline int pte_file(pte_t pte) { return pte_val(pte) & _PAGE_FILE;} +static inline int pte_huge(pte_t pte) { return pte_val(pte) & _PAGE_HUGE;} static inline void pte_uncache(pte_t pte) { pte_val(pte) |= _PAGE_NO_CACHE; } static inline void pte_cache(pte_t pte) { pte_val(pte) &= ~_PAGE_NO_CACHE; } @@ -294,6 +290,8 @@ static inline pte_t pte_mkdirty(pte_t pte) { pte_val(pte) |= _PAGE_DIRTY; return pte; } static inline pte_t pte_mkyoung(pte_t pte) { pte_val(pte) |= _PAGE_ACCESSED; return pte; } +static inline pte_t pte_mkhuge(pte_t pte) { + pte_val(pte) |= _PAGE_HUGE; return pte; } /* Atomic PTE updates */ static inline unsigned long pte_update(pte_t *p, unsigned long clr) @@ -464,6 +462,10 @@ extern pgd_t ioremap_dir[1024]; extern void paging_init(void); +struct mmu_gather; +void hugetlb_free_pgtables(struct mmu_gather *tlb, struct vm_area_struct *prev, + unsigned long start, unsigned long end); + /* * This gets called at the end of handling a page fault, when * the kernel has put a new PTE into the page table for the process. @@ -492,7 +494,8 @@ extern void update_mmu_cache(struct vm_area_struct *, unsigned long, pte_t); */ #define kern_addr_valid(addr) (1) -#define io_remap_page_range remap_page_range +#define io_remap_page_range(vma, vaddr, paddr, size, prot) \ + remap_pfn_range(vma, vaddr, (paddr) >> PAGE_SHIFT, size, prot) void pgtable_cache_init(void);