X-Git-Url: http://git.onelab.eu/?a=blobdiff_plain;f=arch%2Fppc%2Fmm%2Fpgtable.c;h=354a9408f02436b83ac8204d8a854686c675e264;hb=97bf2856c6014879bd04983a3e9dfcdac1e7fe85;hp=b1b93fc18d4d9af5546bb8aaf0adb0f94b9b126a;hpb=5273a3df6485dc2ad6aa7ddd441b9a21970f003b;p=linux-2.6.git diff --git a/arch/ppc/mm/pgtable.c b/arch/ppc/mm/pgtable.c index b1b93fc18..354a9408f 100644 --- a/arch/ppc/mm/pgtable.c +++ b/arch/ppc/mm/pgtable.c @@ -20,8 +20,8 @@ * */ -#include #include +#include #include #include #include @@ -38,10 +38,14 @@ unsigned long ioremap_base; unsigned long ioremap_bot; int io_bat_index; -#if defined(CONFIG_6xx) || defined(CONFIG_POWER3) +#if defined(CONFIG_6xx) #define HAVE_BATS 1 #endif +#if defined(CONFIG_FSL_BOOKE) +#define HAVE_TLBCAM 1 +#endif + extern char etext[], _stext[]; #ifdef CONFIG_SMP @@ -59,7 +63,16 @@ void setbat(int index, unsigned long virt, unsigned long phys, #define p_mapped_by_bats(x) (0UL) #endif /* HAVE_BATS */ -#ifdef CONFIG_44x +#ifdef HAVE_TLBCAM +extern unsigned int tlbcam_index; +extern unsigned long v_mapped_by_tlbcam(unsigned long va); +extern unsigned long p_mapped_by_tlbcam(unsigned long pa); +#else /* !HAVE_TLBCAM */ +#define v_mapped_by_tlbcam(x) (0UL) +#define p_mapped_by_tlbcam(x) (0UL) +#endif /* HAVE_TLBCAM */ + +#ifdef CONFIG_PTE_64BIT /* 44x uses an 8kB pgdir because it has 8-byte Linux PTEs. */ #define PGDIR_ORDER 1 #else @@ -70,8 +83,7 @@ pgd_t *pgd_alloc(struct mm_struct *mm) { pgd_t *ret; - if ((ret = (pgd_t *)__get_free_pages(GFP_KERNEL, PGDIR_ORDER)) != NULL) - clear_pages(ret, PGDIR_ORDER); + ret = (pgd_t *)__get_free_pages(GFP_KERNEL|__GFP_ZERO, PGDIR_ORDER); return ret; } @@ -86,29 +98,30 @@ pte_t *pte_alloc_one_kernel(struct mm_struct *mm, unsigned long address) extern int mem_init_done; extern void *early_get_page(void); - if (mem_init_done) - pte = (pte_t *)__get_free_page(GFP_KERNEL|__GFP_REPEAT); - else + if (mem_init_done) { + pte = (pte_t *)__get_free_page(GFP_KERNEL|__GFP_REPEAT|__GFP_ZERO); + } else { pte = (pte_t *)early_get_page(); - if (pte) - clear_page(pte); + if (pte) + clear_page(pte); + } return pte; } struct page *pte_alloc_one(struct mm_struct *mm, unsigned long address) { - struct page *pte; + struct page *ptepage; #ifdef CONFIG_HIGHPTE - int flags = GFP_KERNEL | __GFP_HIGHMEM | __GFP_REPEAT; + gfp_t flags = GFP_KERNEL | __GFP_HIGHMEM | __GFP_REPEAT; #else - int flags = GFP_KERNEL | __GFP_REPEAT; + gfp_t flags = GFP_KERNEL | __GFP_REPEAT; #endif - pte = alloc_pages(flags, 0); - if (pte) - clear_highpage(pte); - return pte; + ptepage = alloc_pages(flags, 0); + if (ptepage) + clear_highpage(ptepage); + return ptepage; } void pte_free_kernel(pte_t *pte) @@ -119,37 +132,37 @@ void pte_free_kernel(pte_t *pte) free_page((unsigned long)pte); } -void pte_free(struct page *pte) +void pte_free(struct page *ptepage) { #ifdef CONFIG_SMP hash_page_sync(); #endif - __free_page(pte); + __free_page(ptepage); } -#ifndef CONFIG_44x -void * +#ifndef CONFIG_PHYS_64BIT +void __iomem * ioremap(phys_addr_t addr, unsigned long size) { return __ioremap(addr, size, _PAGE_NO_CACHE); } -#else /* CONFIG_44x */ -void * +#else /* CONFIG_PHYS_64BIT */ +void __iomem * ioremap64(unsigned long long addr, unsigned long size) { return __ioremap(addr, size, _PAGE_NO_CACHE); } -void * +void __iomem * ioremap(phys_addr_t addr, unsigned long size) { phys_addr_t addr64 = fixup_bigphys_addr(addr, size); return ioremap64(addr64, size); } -#endif /* CONFIG_44x */ +#endif /* CONFIG_PHYS_64BIT */ -void * +void __iomem * __ioremap(phys_addr_t addr, unsigned long size, unsigned long flags) { unsigned long v, i; @@ -178,7 +191,7 @@ __ioremap(phys_addr_t addr, unsigned long size, unsigned long flags) */ if ( mem_init_done && (p < virt_to_phys(high_memory)) ) { - printk("__ioremap(): phys addr "PTE_FMT" is RAM lr %p\n", p, + printk("__ioremap(): phys addr "PHYS_FMT" is RAM lr %p\n", p, __builtin_return_address(0)); return NULL; } @@ -200,6 +213,9 @@ __ioremap(phys_addr_t addr, unsigned long size, unsigned long flags) if ((v = p_mapped_by_bats(p)) /*&& p_mapped_by_bats(p+size-1)*/ ) goto out; + if ((v = p_mapped_by_tlbcam(p))) + goto out; + if (mem_init_done) { struct vm_struct *area; area = get_vm_area(size, VM_IOREMAP); @@ -229,10 +245,10 @@ __ioremap(phys_addr_t addr, unsigned long size, unsigned long flags) } out: - return (void *) (v + ((unsigned long)addr & ~PAGE_MASK)); + return (void __iomem *) (v + ((unsigned long)addr & ~PAGE_MASK)); } -void iounmap(void *addr) +void iounmap(volatile void __iomem *addr) { /* * If mapped by BATs then there is nothing to do. @@ -244,6 +260,18 @@ void iounmap(void *addr) vunmap((void *) (PAGE_MASK & (unsigned long)addr)); } +void __iomem *ioport_map(unsigned long port, unsigned int len) +{ + return (void __iomem *) (port + _IO_BASE); +} + +void ioport_unmap(void __iomem *addr) +{ + /* Nothing to do */ +} +EXPORT_SYMBOL(ioport_map); +EXPORT_SYMBOL(ioport_unmap); + int map_page(unsigned long va, phys_addr_t pa, int flags) { @@ -251,18 +279,16 @@ map_page(unsigned long va, phys_addr_t pa, int flags) pte_t *pg; int err = -ENOMEM; - spin_lock(&init_mm.page_table_lock); /* Use upper 10 bits of VA to index the first level map */ pd = pmd_offset(pgd_offset_k(va), va); /* Use middle 10 bits of VA to index the second-level map */ - pg = pte_alloc_kernel(&init_mm, pd, va); + pg = pte_alloc_kernel(pd, va); if (pg != 0) { err = 0; - set_pte(pg, pfn_pte(pa >> PAGE_SHIFT, __pgprot(flags))); + set_pte_at(&init_mm, va, pg, pfn_pte(pa >> PAGE_SHIFT, __pgprot(flags))); if (mem_init_done) flush_HPTE(0, va, pmd_val(*pd)); } - spin_unlock(&init_mm.page_table_lock); return err; } @@ -290,6 +316,9 @@ void __init mapin_ram(void) /* is x a power of 2? */ #define is_power_of_2(x) ((x) != 0 && (((x) & ((x) - 1)) == 0)) +/* is x a power of 4? */ +#define is_power_of_4(x) ((x) != 0 && (((x) & (x-1)) == 0) && (ffs(x) & 1)) + /* * Set up a mapping for a block of I/O. * virt, phys, size must all be page-aligned. @@ -315,6 +344,18 @@ void __init io_block_mapping(unsigned long virt, phys_addr_t phys, } #endif /* HAVE_BATS */ +#ifdef HAVE_TLBCAM + /* + * Use a CAM for this if possible... + */ + if (tlbcam_index < num_tlbcam_entries && is_power_of_4(size) + && (virt & (size - 1)) == 0 && (phys & (size - 1)) == 0) { + settlbcam(tlbcam_index, virt, phys, size, flags, 0); + ++tlbcam_index; + return; + } +#endif /* HAVE_TLBCAM */ + /* No BATs available, put it in the page tables. */ for (i = 0; i < size; i += PAGE_SIZE) map_page(virt + i, phys + i, flags); @@ -326,7 +367,7 @@ void __init io_block_mapping(unsigned long virt, phys_addr_t phys, * the PTE pointer is unmodified if PTE is not found. */ int -get_pteptr(struct mm_struct *mm, unsigned long addr, pte_t **ptep) +get_pteptr(struct mm_struct *mm, unsigned long addr, pte_t **ptep, pmd_t **pmdp) { pgd_t *pgd; pmd_t *pmd; @@ -341,6 +382,8 @@ get_pteptr(struct mm_struct *mm, unsigned long addr, pte_t **ptep) if (pte) { retval = 1; *ptep = pte; + if (pmdp) + *pmdp = pmd; /* XXX caller needs to do pte_unmap, yuck */ } } @@ -378,7 +421,7 @@ unsigned long iopa(unsigned long addr) mm = &init_mm; pa = 0; - if (get_pteptr(mm, addr, &pte)) { + if (get_pteptr(mm, addr, &pte, NULL)) { pa = (pte_val(*pte) & PAGE_MASK) | (addr & ~PAGE_MASK); pte_unmap(pte); }