X-Git-Url: http://git.onelab.eu/?a=blobdiff_plain;f=arch%2Fpowerpc%2Fmm%2Fhash_utils_64.c;h=c006d9039633dc75f020a71da32ce254c619c937;hb=9464c7cf61b9433057924c36e6e02f303a00e768;hp=1915661c2c817b65944877d4943b688cc273cc1d;hpb=41689045f6a3cbe0550e1d34e9cc20d2e8c432ba;p=linux-2.6.git diff --git a/arch/powerpc/mm/hash_utils_64.c b/arch/powerpc/mm/hash_utils_64.c index 1915661c2..c006d9039 100644 --- a/arch/powerpc/mm/hash_utils_64.c +++ b/arch/powerpc/mm/hash_utils_64.c @@ -21,6 +21,7 @@ #undef DEBUG #undef DEBUG_LOW +#include #include #include #include @@ -91,15 +92,10 @@ unsigned long htab_size_bytes; unsigned long htab_hash_mask; int mmu_linear_psize = MMU_PAGE_4K; int mmu_virtual_psize = MMU_PAGE_4K; -int mmu_vmalloc_psize = MMU_PAGE_4K; -int mmu_io_psize = MMU_PAGE_4K; #ifdef CONFIG_HUGETLB_PAGE int mmu_huge_psize = MMU_PAGE_16M; unsigned int HPAGE_SHIFT; #endif -#ifdef CONFIG_PPC_64K_PAGES -int mmu_ci_restrictions; -#endif /* There are definitions of page sizes arrays to be used when none * is provided by the firmware. @@ -166,12 +162,34 @@ int htab_bolt_mapping(unsigned long vstart, unsigned long vend, hash = hpt_hash(va, shift); hpteg = ((hash & htab_hash_mask) * HPTES_PER_GROUP); - DBG("htab_bolt_mapping: calling %p\n", ppc_md.hpte_insert); - - BUG_ON(!ppc_md.hpte_insert); - ret = ppc_md.hpte_insert(hpteg, va, paddr, - tmp_mode, HPTE_V_BOLTED, psize); - + /* The crap below can be cleaned once ppd_md.probe() can + * set up the hash callbacks, thus we can just used the + * normal insert callback here. + */ +#ifdef CONFIG_PPC_ISERIES + if (machine_is(iseries)) + ret = iSeries_hpte_insert(hpteg, va, + paddr, + tmp_mode, + HPTE_V_BOLTED, + psize); + else +#endif +#ifdef CONFIG_PPC_PSERIES + if (machine_is(pseries) && firmware_has_feature(FW_FEATURE_LPAR)) + ret = pSeries_lpar_hpte_insert(hpteg, va, + paddr, + tmp_mode, + HPTE_V_BOLTED, + psize); + else +#endif +#ifdef CONFIG_PPC_MULTIPLATFORM + ret = native_hpte_insert(hpteg, va, + paddr, + tmp_mode, HPTE_V_BOLTED, + psize); +#endif if (ret < 0) break; } @@ -290,31 +308,20 @@ static void __init htab_init_page_sizes(void) else if (mmu_psize_defs[MMU_PAGE_1M].shift) mmu_linear_psize = MMU_PAGE_1M; -#ifdef CONFIG_PPC_64K_PAGES /* * Pick a size for the ordinary pages. Default is 4K, we support - * 64K for user mappings and vmalloc if supported by the processor. - * We only use 64k for ioremap if the processor - * (and firmware) support cache-inhibited large pages. - * If not, we use 4k and set mmu_ci_restrictions so that - * hash_page knows to switch processes that use cache-inhibited - * mappings to 4k pages. + * 64K if cache inhibited large pages are supported by the + * processor */ - if (mmu_psize_defs[MMU_PAGE_64K].shift) { +#ifdef CONFIG_PPC_64K_PAGES + if (mmu_psize_defs[MMU_PAGE_64K].shift && + cpu_has_feature(CPU_FTR_CI_LARGE_PAGE)) mmu_virtual_psize = MMU_PAGE_64K; - mmu_vmalloc_psize = MMU_PAGE_64K; - if (cpu_has_feature(CPU_FTR_CI_LARGE_PAGE)) - mmu_io_psize = MMU_PAGE_64K; - else - mmu_ci_restrictions = 1; - } #endif - printk(KERN_DEBUG "Page orders: linear mapping = %d, " - "virtual = %d, io = %d\n", + printk(KERN_INFO "Page orders: linear mapping = %d, others = %d\n", mmu_psize_defs[mmu_linear_psize].shift, - mmu_psize_defs[mmu_virtual_psize].shift, - mmu_psize_defs[mmu_io_psize].shift); + mmu_psize_defs[mmu_virtual_psize].shift); #ifdef CONFIG_HUGETLB_PAGE /* Init large page size. Currently, we pick 16M or 1M depending @@ -390,41 +397,6 @@ void create_section_mapping(unsigned long start, unsigned long end) } #endif /* CONFIG_MEMORY_HOTPLUG */ -static inline void make_bl(unsigned int *insn_addr, void *func) -{ - unsigned long funcp = *((unsigned long *)func); - int offset = funcp - (unsigned long)insn_addr; - - *insn_addr = (unsigned int)(0x48000001 | (offset & 0x03fffffc)); - flush_icache_range((unsigned long)insn_addr, 4+ - (unsigned long)insn_addr); -} - -static void __init htab_finish_init(void) -{ - extern unsigned int *htab_call_hpte_insert1; - extern unsigned int *htab_call_hpte_insert2; - extern unsigned int *htab_call_hpte_remove; - extern unsigned int *htab_call_hpte_updatepp; - -#ifdef CONFIG_PPC_64K_PAGES - extern unsigned int *ht64_call_hpte_insert1; - extern unsigned int *ht64_call_hpte_insert2; - extern unsigned int *ht64_call_hpte_remove; - extern unsigned int *ht64_call_hpte_updatepp; - - make_bl(ht64_call_hpte_insert1, ppc_md.hpte_insert); - make_bl(ht64_call_hpte_insert2, ppc_md.hpte_insert); - make_bl(ht64_call_hpte_remove, ppc_md.hpte_remove); - make_bl(ht64_call_hpte_updatepp, ppc_md.hpte_updatepp); -#endif /* CONFIG_PPC_64K_PAGES */ - - make_bl(htab_call_hpte_insert1, ppc_md.hpte_insert); - make_bl(htab_call_hpte_insert2, ppc_md.hpte_insert); - make_bl(htab_call_hpte_remove, ppc_md.hpte_remove); - make_bl(htab_call_hpte_updatepp, ppc_md.hpte_updatepp); -} - void __init htab_initialize(void) { unsigned long table; @@ -537,8 +509,6 @@ void __init htab_initialize(void) mmu_linear_psize)); } - htab_finish_init(); - DBG(" <- htab_initialize()\n"); } #undef KB @@ -586,7 +556,6 @@ int hash_page(unsigned long ea, unsigned long access, unsigned long trap) pte_t *ptep; cpumask_t tmp; int rc, user_region = 0, local = 0; - int psize; DBG_LOW("hash_page(ea=%016lx, access=%lx, trap=%lx\n", ea, access, trap); @@ -606,15 +575,10 @@ int hash_page(unsigned long ea, unsigned long access, unsigned long trap) return 1; } vsid = get_vsid(mm->context.id, ea); - psize = mm->context.user_psize; break; case VMALLOC_REGION_ID: mm = &init_mm; vsid = get_kernel_vsid(ea); - if (ea < VMALLOC_END) - psize = mmu_vmalloc_psize; - else - psize = mmu_io_psize; break; default: /* Not a valid range @@ -665,40 +629,7 @@ int hash_page(unsigned long ea, unsigned long access, unsigned long trap) #ifndef CONFIG_PPC_64K_PAGES rc = __hash_page_4K(ea, access, vsid, ptep, trap, local); #else - if (mmu_ci_restrictions) { - /* If this PTE is non-cacheable, switch to 4k */ - if (psize == MMU_PAGE_64K && - (pte_val(*ptep) & _PAGE_NO_CACHE)) { - if (user_region) { - psize = MMU_PAGE_4K; - mm->context.user_psize = MMU_PAGE_4K; - mm->context.sllp = SLB_VSID_USER | - mmu_psize_defs[MMU_PAGE_4K].sllp; - } else if (ea < VMALLOC_END) { - /* - * some driver did a non-cacheable mapping - * in vmalloc space, so switch vmalloc - * to 4k pages - */ - printk(KERN_ALERT "Reducing vmalloc segment " - "to 4kB pages because of " - "non-cacheable mapping\n"); - psize = mmu_vmalloc_psize = MMU_PAGE_4K; - } - } - if (user_region) { - if (psize != get_paca()->context.user_psize) { - get_paca()->context = mm->context; - slb_flush_and_rebolt(); - } - } else if (get_paca()->vmalloc_sllp != - mmu_psize_defs[mmu_vmalloc_psize].sllp) { - get_paca()->vmalloc_sllp = - mmu_psize_defs[mmu_vmalloc_psize].sllp; - slb_flush_and_rebolt(); - } - } - if (psize == MMU_PAGE_64K) + if (mmu_virtual_psize == MMU_PAGE_64K) rc = __hash_page_64K(ea, access, vsid, ptep, trap, local); else rc = __hash_page_4K(ea, access, vsid, ptep, trap, local); @@ -750,18 +681,7 @@ void hash_preload(struct mm_struct *mm, unsigned long ea, #ifndef CONFIG_PPC_64K_PAGES __hash_page_4K(ea, access, vsid, ptep, trap, local); #else - if (mmu_ci_restrictions) { - /* If this PTE is non-cacheable, switch to 4k */ - if (mm->context.user_psize == MMU_PAGE_64K && - (pte_val(*ptep) & _PAGE_NO_CACHE)) { - mm->context.user_psize = MMU_PAGE_4K; - mm->context.sllp = SLB_VSID_USER | - mmu_psize_defs[MMU_PAGE_4K].sllp; - get_paca()->context = mm->context; - slb_flush_and_rebolt(); - } - } - if (mm->context.user_psize == MMU_PAGE_64K) + if (mmu_virtual_psize == MMU_PAGE_64K) __hash_page_64K(ea, access, vsid, ptep, trap, local); else __hash_page_4K(ea, access, vsid, ptep, trap, local); @@ -801,6 +721,16 @@ void flush_hash_range(unsigned long number, int local) } } +static inline void make_bl(unsigned int *insn_addr, void *func) +{ + unsigned long funcp = *((unsigned long *)func); + int offset = funcp - (unsigned long)insn_addr; + + *insn_addr = (unsigned int)(0x48000001 | (offset & 0x03fffffc)); + flush_icache_range((unsigned long)insn_addr, 4+ + (unsigned long)insn_addr); +} + /* * low_hash_fault is called when we the low level hash code failed * to instert a PTE due to an hypervisor error @@ -819,3 +749,28 @@ void low_hash_fault(struct pt_regs *regs, unsigned long address) } bad_page_fault(regs, address, SIGBUS); } + +void __init htab_finish_init(void) +{ + extern unsigned int *htab_call_hpte_insert1; + extern unsigned int *htab_call_hpte_insert2; + extern unsigned int *htab_call_hpte_remove; + extern unsigned int *htab_call_hpte_updatepp; + +#ifdef CONFIG_PPC_64K_PAGES + extern unsigned int *ht64_call_hpte_insert1; + extern unsigned int *ht64_call_hpte_insert2; + extern unsigned int *ht64_call_hpte_remove; + extern unsigned int *ht64_call_hpte_updatepp; + + make_bl(ht64_call_hpte_insert1, ppc_md.hpte_insert); + make_bl(ht64_call_hpte_insert2, ppc_md.hpte_insert); + make_bl(ht64_call_hpte_remove, ppc_md.hpte_remove); + make_bl(ht64_call_hpte_updatepp, ppc_md.hpte_updatepp); +#endif /* CONFIG_PPC_64K_PAGES */ + + make_bl(htab_call_hpte_insert1, ppc_md.hpte_insert); + make_bl(htab_call_hpte_insert2, ppc_md.hpte_insert); + make_bl(htab_call_hpte_remove, ppc_md.hpte_remove); + make_bl(htab_call_hpte_updatepp, ppc_md.hpte_updatepp); +}