#undef DEBUG
#undef DEBUG_LOW
+#include <linux/config.h>
#include <linux/spinlock.h>
#include <linux/errno.h>
#include <linux/sched.h>
unsigned long htab_hash_mask;
int mmu_linear_psize = MMU_PAGE_4K;
int mmu_virtual_psize = MMU_PAGE_4K;
-int mmu_vmalloc_psize = MMU_PAGE_4K;
-int mmu_io_psize = MMU_PAGE_4K;
#ifdef CONFIG_HUGETLB_PAGE
int mmu_huge_psize = MMU_PAGE_16M;
unsigned int HPAGE_SHIFT;
#endif
-#ifdef CONFIG_PPC_64K_PAGES
-int mmu_ci_restrictions;
-#endif
/* There are definitions of page sizes arrays to be used when none
* is provided by the firmware.
hash = hpt_hash(va, shift);
hpteg = ((hash & htab_hash_mask) * HPTES_PER_GROUP);
- DBG("htab_bolt_mapping: calling %p\n", ppc_md.hpte_insert);
-
- BUG_ON(!ppc_md.hpte_insert);
- ret = ppc_md.hpte_insert(hpteg, va, paddr,
- tmp_mode, HPTE_V_BOLTED, psize);
-
+ /* The crap below can be cleaned once ppd_md.probe() can
+ * set up the hash callbacks, thus we can just used the
+ * normal insert callback here.
+ */
+#ifdef CONFIG_PPC_ISERIES
+ if (_machine == PLATFORM_ISERIES_LPAR)
+ ret = iSeries_hpte_insert(hpteg, va,
+ __pa(vaddr),
+ tmp_mode,
+ HPTE_V_BOLTED,
+ psize);
+ else
+#endif
+#ifdef CONFIG_PPC_PSERIES
+ if (_machine & PLATFORM_LPAR)
+ ret = pSeries_lpar_hpte_insert(hpteg, va,
+ virt_to_abs(paddr),
+ tmp_mode,
+ HPTE_V_BOLTED,
+ psize);
+ else
+#endif
+#ifdef CONFIG_PPC_MULTIPLATFORM
+ ret = native_hpte_insert(hpteg, va,
+ virt_to_abs(paddr),
+ tmp_mode, HPTE_V_BOLTED,
+ psize);
+#endif
if (ret < 0)
break;
}
* Not in the device-tree, let's fallback on known size
* list for 16M capable GP & GR
*/
- if (cpu_has_feature(CPU_FTR_16M_PAGE) && !machine_is(iseries))
+ if ((_machine != PLATFORM_ISERIES_LPAR) &&
+ cpu_has_feature(CPU_FTR_16M_PAGE))
memcpy(mmu_psize_defs, mmu_psize_defaults_gp,
sizeof(mmu_psize_defaults_gp));
found:
else if (mmu_psize_defs[MMU_PAGE_1M].shift)
mmu_linear_psize = MMU_PAGE_1M;
-#ifdef CONFIG_PPC_64K_PAGES
/*
* Pick a size for the ordinary pages. Default is 4K, we support
- * 64K for user mappings and vmalloc if supported by the processor.
- * We only use 64k for ioremap if the processor
- * (and firmware) support cache-inhibited large pages.
- * If not, we use 4k and set mmu_ci_restrictions so that
- * hash_page knows to switch processes that use cache-inhibited
- * mappings to 4k pages.
+ * 64K if cache inhibited large pages are supported by the
+ * processor
*/
- if (mmu_psize_defs[MMU_PAGE_64K].shift) {
+#ifdef CONFIG_PPC_64K_PAGES
+ if (mmu_psize_defs[MMU_PAGE_64K].shift &&
+ cpu_has_feature(CPU_FTR_CI_LARGE_PAGE))
mmu_virtual_psize = MMU_PAGE_64K;
- mmu_vmalloc_psize = MMU_PAGE_64K;
- if (cpu_has_feature(CPU_FTR_CI_LARGE_PAGE))
- mmu_io_psize = MMU_PAGE_64K;
- else
- mmu_ci_restrictions = 1;
- }
#endif
- printk(KERN_DEBUG "Page orders: linear mapping = %d, "
- "virtual = %d, io = %d\n",
+ printk(KERN_INFO "Page orders: linear mapping = %d, others = %d\n",
mmu_psize_defs[mmu_linear_psize].shift,
- mmu_psize_defs[mmu_virtual_psize].shift,
- mmu_psize_defs[mmu_io_psize].shift);
+ mmu_psize_defs[mmu_virtual_psize].shift);
#ifdef CONFIG_HUGETLB_PAGE
/* Init large page size. Currently, we pick 16M or 1M depending
#ifdef CONFIG_MEMORY_HOTPLUG
void create_section_mapping(unsigned long start, unsigned long end)
{
- BUG_ON(htab_bolt_mapping(start, end, __pa(start),
+ BUG_ON(htab_bolt_mapping(start, end, start,
_PAGE_ACCESSED | _PAGE_DIRTY | _PAGE_COHERENT | PP_RWXX,
mmu_linear_psize));
}
#endif /* CONFIG_MEMORY_HOTPLUG */
-static inline void make_bl(unsigned int *insn_addr, void *func)
-{
- unsigned long funcp = *((unsigned long *)func);
- int offset = funcp - (unsigned long)insn_addr;
-
- *insn_addr = (unsigned int)(0x48000001 | (offset & 0x03fffffc));
- flush_icache_range((unsigned long)insn_addr, 4+
- (unsigned long)insn_addr);
-}
-
-static void __init htab_finish_init(void)
-{
- extern unsigned int *htab_call_hpte_insert1;
- extern unsigned int *htab_call_hpte_insert2;
- extern unsigned int *htab_call_hpte_remove;
- extern unsigned int *htab_call_hpte_updatepp;
-
-#ifdef CONFIG_PPC_64K_PAGES
- extern unsigned int *ht64_call_hpte_insert1;
- extern unsigned int *ht64_call_hpte_insert2;
- extern unsigned int *ht64_call_hpte_remove;
- extern unsigned int *ht64_call_hpte_updatepp;
-
- make_bl(ht64_call_hpte_insert1, ppc_md.hpte_insert);
- make_bl(ht64_call_hpte_insert2, ppc_md.hpte_insert);
- make_bl(ht64_call_hpte_remove, ppc_md.hpte_remove);
- make_bl(ht64_call_hpte_updatepp, ppc_md.hpte_updatepp);
-#endif /* CONFIG_PPC_64K_PAGES */
-
- make_bl(htab_call_hpte_insert1, ppc_md.hpte_insert);
- make_bl(htab_call_hpte_insert2, ppc_md.hpte_insert);
- make_bl(htab_call_hpte_remove, ppc_md.hpte_remove);
- make_bl(htab_call_hpte_updatepp, ppc_md.hpte_updatepp);
-}
-
void __init htab_initialize(void)
{
unsigned long table;
htab_hash_mask = pteg_count - 1;
- if (firmware_has_feature(FW_FEATURE_LPAR)) {
+ if (platform_is_lpar()) {
/* Using a hypervisor which owns the htab */
htab_address = NULL;
_SDR1 = 0;
* the absolute address space.
*/
table = lmb_alloc(htab_size_bytes, htab_size_bytes);
+ BUG_ON(table == 0);
DBG("Hash table allocated at %lx, size: %lx\n", table,
htab_size_bytes);
if (dart_tablebase != 0 && dart_tablebase >= base
&& dart_tablebase < (base + size)) {
- unsigned long dart_table_end = dart_tablebase + 16 * MB;
if (base != dart_tablebase)
BUG_ON(htab_bolt_mapping(base, dart_tablebase,
- __pa(base), mode_rw,
- mmu_linear_psize));
- if ((base + size) > dart_table_end)
+ base, mode_rw,
+ mmu_linear_psize));
+ if ((base + size) > (dart_tablebase + 16*MB))
BUG_ON(htab_bolt_mapping(dart_tablebase+16*MB,
- base + size,
- __pa(dart_table_end),
+ base + size,
+ dart_tablebase+16*MB,
mode_rw,
mmu_linear_psize));
continue;
}
#endif /* CONFIG_U3_DART */
- BUG_ON(htab_bolt_mapping(base, base + size, __pa(base),
- mode_rw, mmu_linear_psize));
+ BUG_ON(htab_bolt_mapping(base, base + size, base,
+ mode_rw, mmu_linear_psize));
}
/*
if (base + size >= tce_alloc_start)
tce_alloc_start = base + size + 1;
- BUG_ON(htab_bolt_mapping(tce_alloc_start, tce_alloc_end,
- __pa(tce_alloc_start), mode_rw,
+ BUG_ON(htab_bolt_mapping(tce_alloc_start, tce_alloc_end,
+ tce_alloc_start, mode_rw,
mmu_linear_psize));
}
- htab_finish_init();
-
DBG(" <- htab_initialize()\n");
}
#undef KB
void htab_initialize_secondary(void)
{
- if (!firmware_has_feature(FW_FEATURE_LPAR))
+ if (!platform_is_lpar())
mtspr(SPRN_SDR1, _SDR1);
}
pte_t *ptep;
cpumask_t tmp;
int rc, user_region = 0, local = 0;
- int psize;
DBG_LOW("hash_page(ea=%016lx, access=%lx, trap=%lx\n",
ea, access, trap);
return 1;
}
vsid = get_vsid(mm->context.id, ea);
- psize = mm->context.user_psize;
break;
case VMALLOC_REGION_ID:
mm = &init_mm;
vsid = get_kernel_vsid(ea);
- if (ea < VMALLOC_END)
- psize = mmu_vmalloc_psize;
- else
- psize = mmu_io_psize;
break;
default:
/* Not a valid range
#ifndef CONFIG_PPC_64K_PAGES
rc = __hash_page_4K(ea, access, vsid, ptep, trap, local);
#else
- if (mmu_ci_restrictions) {
- /* If this PTE is non-cacheable, switch to 4k */
- if (psize == MMU_PAGE_64K &&
- (pte_val(*ptep) & _PAGE_NO_CACHE)) {
- if (user_region) {
- psize = MMU_PAGE_4K;
- mm->context.user_psize = MMU_PAGE_4K;
- mm->context.sllp = SLB_VSID_USER |
- mmu_psize_defs[MMU_PAGE_4K].sllp;
- } else if (ea < VMALLOC_END) {
- /*
- * some driver did a non-cacheable mapping
- * in vmalloc space, so switch vmalloc
- * to 4k pages
- */
- printk(KERN_ALERT "Reducing vmalloc segment "
- "to 4kB pages because of "
- "non-cacheable mapping\n");
- psize = mmu_vmalloc_psize = MMU_PAGE_4K;
- }
- }
- if (user_region) {
- if (psize != get_paca()->context.user_psize) {
- get_paca()->context = mm->context;
- slb_flush_and_rebolt();
- }
- } else if (get_paca()->vmalloc_sllp !=
- mmu_psize_defs[mmu_vmalloc_psize].sllp) {
- get_paca()->vmalloc_sllp =
- mmu_psize_defs[mmu_vmalloc_psize].sllp;
- slb_flush_and_rebolt();
- }
- }
- if (psize == MMU_PAGE_64K)
+ if (mmu_virtual_psize == MMU_PAGE_64K)
rc = __hash_page_64K(ea, access, vsid, ptep, trap, local);
else
rc = __hash_page_4K(ea, access, vsid, ptep, trap, local);
#ifndef CONFIG_PPC_64K_PAGES
__hash_page_4K(ea, access, vsid, ptep, trap, local);
#else
- if (mmu_ci_restrictions) {
- /* If this PTE is non-cacheable, switch to 4k */
- if (mm->context.user_psize == MMU_PAGE_64K &&
- (pte_val(*ptep) & _PAGE_NO_CACHE)) {
- mm->context.user_psize = MMU_PAGE_4K;
- mm->context.sllp = SLB_VSID_USER |
- mmu_psize_defs[MMU_PAGE_4K].sllp;
- get_paca()->context = mm->context;
- slb_flush_and_rebolt();
- }
- }
- if (mm->context.user_psize == MMU_PAGE_64K)
+ if (mmu_virtual_psize == MMU_PAGE_64K)
__hash_page_64K(ea, access, vsid, ptep, trap, local);
else
__hash_page_4K(ea, access, vsid, ptep, trap, local);
}
}
+static inline void make_bl(unsigned int *insn_addr, void *func)
+{
+ unsigned long funcp = *((unsigned long *)func);
+ int offset = funcp - (unsigned long)insn_addr;
+
+ *insn_addr = (unsigned int)(0x48000001 | (offset & 0x03fffffc));
+ flush_icache_range((unsigned long)insn_addr, 4+
+ (unsigned long)insn_addr);
+}
+
/*
* low_hash_fault is called when we the low level hash code failed
* to instert a PTE due to an hypervisor error
}
bad_page_fault(regs, address, SIGBUS);
}
+
+void __init htab_finish_init(void)
+{
+ extern unsigned int *htab_call_hpte_insert1;
+ extern unsigned int *htab_call_hpte_insert2;
+ extern unsigned int *htab_call_hpte_remove;
+ extern unsigned int *htab_call_hpte_updatepp;
+
+#ifdef CONFIG_PPC_64K_PAGES
+ extern unsigned int *ht64_call_hpte_insert1;
+ extern unsigned int *ht64_call_hpte_insert2;
+ extern unsigned int *ht64_call_hpte_remove;
+ extern unsigned int *ht64_call_hpte_updatepp;
+
+ make_bl(ht64_call_hpte_insert1, ppc_md.hpte_insert);
+ make_bl(ht64_call_hpte_insert2, ppc_md.hpte_insert);
+ make_bl(ht64_call_hpte_remove, ppc_md.hpte_remove);
+ make_bl(ht64_call_hpte_updatepp, ppc_md.hpte_updatepp);
+#endif /* CONFIG_PPC_64K_PAGES */
+
+ make_bl(htab_call_hpte_insert1, ppc_md.hpte_insert);
+ make_bl(htab_call_hpte_insert2, ppc_md.hpte_insert);
+ make_bl(htab_call_hpte_remove, ppc_md.hpte_remove);
+ make_bl(htab_call_hpte_updatepp, ppc_md.hpte_updatepp);
+}