* 2 of the License, or (at your option) any later version.
*/
+#undef DEBUG
+
#include <linux/config.h>
#include <linux/spinlock.h>
#include <linux/errno.h>
#include <linux/ctype.h>
#include <linux/cache.h>
#include <linux/init.h>
+#include <linux/signal.h>
#include <asm/ppcdebug.h>
#include <asm/processor.h>
#include <asm/types.h>
#include <asm/system.h>
#include <asm/uaccess.h>
-#include <asm/naca.h>
#include <asm/machdep.h>
#include <asm/lmb.h>
#include <asm/abs_addr.h>
#include <asm/cputable.h>
#include <asm/abs_addr.h>
+#ifdef DEBUG
+#define DBG(fmt...) udbg_printf(fmt)
+#else
+#define DBG(fmt...)
+#endif
+
/*
* Note: pte --> Linux PTE
* HPTE --> PowerPC Hashed Page Table Entry
*
*/
-#ifdef CONFIG_PMAC_DART
+#ifdef CONFIG_U3_DART
extern unsigned long dart_tablebase;
-#endif /* CONFIG_PMAC_DART */
+#endif /* CONFIG_U3_DART */
-HTAB htab_data = {NULL, 0, 0, 0, 0};
+HPTE *htab_address;
+unsigned long htab_hash_mask;
extern unsigned long _SDR1;
;
}
-#ifdef CONFIG_PPC_PSERIES
+#ifdef CONFIG_PPC_MULTIPLATFORM
static inline void create_pte_mapping(unsigned long start, unsigned long end,
unsigned long mode, int large)
{
int ret;
if (large)
- vpn = va >> LARGE_PAGE_SHIFT;
+ vpn = va >> HPAGE_SHIFT;
else
vpn = va >> PAGE_SHIFT;
hash = hpt_hash(vpn, large);
- hpteg = ((hash & htab_data.htab_hash_mask)*HPTES_PER_GROUP);
+ hpteg = ((hash & htab_hash_mask) * HPTES_PER_GROUP);
- if (systemcfg->platform == PLATFORM_PSERIES_LPAR)
+#ifdef CONFIG_PPC_PSERIES
+ if (systemcfg->platform & PLATFORM_LPAR)
ret = pSeries_lpar_hpte_insert(hpteg, va,
virt_to_abs(addr) >> PAGE_SHIFT,
0, mode, 1, large);
else
- ret = pSeries_hpte_insert(hpteg, va,
+#endif /* CONFIG_PPC_PSERIES */
+ ret = native_hpte_insert(hpteg, va,
virt_to_abs(addr) >> PAGE_SHIFT,
0, mode, 1, large);
unsigned long mode_rw;
int i, use_largepages = 0;
+ DBG(" -> htab_initialize()\n");
+
/*
* Calculate the required size of the htab. We want the number of
* PTEGs to equal one half the number of real pages.
*/
- htab_size_bytes = 1UL << naca->pftSize;
+ htab_size_bytes = 1UL << ppc64_pft_size;
pteg_count = htab_size_bytes >> 7;
/* For debug, make the HTAB 1/8 as big as it normally would be. */
htab_size_bytes = pteg_count << 7;
}
- htab_data.htab_num_ptegs = pteg_count;
- htab_data.htab_hash_mask = pteg_count - 1;
+ htab_hash_mask = pteg_count - 1;
- if (systemcfg->platform == PLATFORM_PSERIES ||
- systemcfg->platform == PLATFORM_POWERMAC) {
+ if (systemcfg->platform & PLATFORM_LPAR) {
+ /* Using a hypervisor which owns the htab */
+ htab_address = NULL;
+ _SDR1 = 0;
+ } else {
/* Find storage for the HPT. Must be contiguous in
* the absolute address space.
*/
table = lmb_alloc(htab_size_bytes, htab_size_bytes);
+
+ DBG("Hash table allocated at %lx, size: %lx\n", table,
+ htab_size_bytes);
+
if ( !table ) {
ppc64_terminate_msg(0x20, "hpt space");
loop_forever();
}
- htab_data.htab = abs_to_virt(table);
+ htab_address = abs_to_virt(table);
/* htab absolute addr + encoded htabsize */
_SDR1 = table + __ilog2(pteg_count) - 11;
/* Initialize the HPT with no entries */
memset((void *)table, 0, htab_size_bytes);
- } else {
- /* Using a hypervisor which owns the htab */
- htab_data.htab = NULL;
- _SDR1 = 0;
}
mode_rw = _PAGE_ACCESSED | _PAGE_COHERENT | PP_RWXX;
if (cur_cpu_spec->cpu_features & CPU_FTR_16M_PAGE)
use_largepages = 1;
- /* add all physical memory to the bootmem map */
+ /* create bolted the linear mapping in the hash table */
for (i=0; i < lmb.memory.cnt; i++) {
unsigned long base, size;
base = lmb.memory.region[i].physbase + KERNELBASE;
size = lmb.memory.region[i].size;
-#ifdef CONFIG_PMAC_DART
+ DBG("creating mapping for region: %lx : %lx\n", base, size);
+
+#ifdef CONFIG_U3_DART
/* Do not map the DART space. Fortunately, it will be aligned
* in such a way that it will not cross two lmb regions and will
* fit within a single 16Mb page.
* only use 2Mb of that space. We will use more of it later for
* AGP GART. We have to use a full 16Mb large page.
*/
+ DBG("DART base: %lx\n", dart_tablebase);
+
if (dart_tablebase != 0 && dart_tablebase >= base
&& dart_tablebase < (base + size)) {
if (base != dart_tablebase)
mode_rw, use_largepages);
continue;
}
-#endif /* CONFIG_PMAC_DART */
+#endif /* CONFIG_U3_DART */
create_pte_mapping(base, base + size, mode_rw, use_largepages);
}
+ DBG(" <- htab_initialize()\n");
}
#undef KB
#undef MB
-#endif
+#endif /* CONFIG_PPC_MULTIPLATFORM */
/*
* Called by asm hashtable.S for doing lazy icache flush
return pp;
}
-/*
- * Called by asm hashtable.S in case of critical insert failure
+/* Result code is:
+ * 0 - handled
+ * 1 - normal page fault
+ * -1 - critical hash insertion error
*/
-void htab_insert_failure(void)
-{
- panic("hash_page: pte_insert failed\n");
-}
-
int hash_page(unsigned long ea, unsigned long access, unsigned long trap)
{
void *pgdir;
int local = 0;
cpumask_t tmp;
- /* Check for invalid addresses. */
- if (!IS_VALID_EA(ea))
- return 1;
-
switch (REGION_ID(ea)) {
case USER_REGION_ID:
user_region = 1;
mm = current->mm;
- if (mm == NULL)
+ if ((ea > USER_END) || (! mm))
return 1;
vsid = get_vsid(mm->context.id, ea);
break;
case IO_REGION_ID:
+ if (ea > IMALLOC_END)
+ return 1;
mm = &ioremap_mm;
vsid = get_kernel_vsid(ea);
break;
case VMALLOC_REGION_ID:
+ if (ea > VMALLOC_END)
+ return 1;
mm = &init_mm;
vsid = get_kernel_vsid(ea);
break;
#if 0
- case EEH_REGION_ID:
- /*
- * Should only be hit if there is an access to MMIO space
- * which is protected by EEH.
- * Send the problem up to do_page_fault
- */
case KERNEL_REGION_ID:
/*
* Should never get here - entire 0xC0... region is bolted.
int local)
{
unsigned long vsid, vpn, va, hash, secondary, slot;
-
- /* XXX fix for large ptes */
- unsigned long large = 0;
+ unsigned long huge = pte_huge(pte);
if ((ea >= USER_START) && (ea <= USER_END))
vsid = get_vsid(context, ea);
vsid = get_kernel_vsid(ea);
va = (vsid << 28) | (ea & 0x0fffffff);
- if (large)
- vpn = va >> LARGE_PAGE_SHIFT;
+ if (huge)
+ vpn = va >> HPAGE_SHIFT;
else
vpn = va >> PAGE_SHIFT;
- hash = hpt_hash(vpn, large);
+ hash = hpt_hash(vpn, huge);
secondary = (pte_val(pte) & _PAGE_SECONDARY) >> 15;
if (secondary)
hash = ~hash;
- slot = (hash & htab_data.htab_hash_mask) * HPTES_PER_GROUP;
+ slot = (hash & htab_hash_mask) * HPTES_PER_GROUP;
slot += (pte_val(pte) & _PAGE_GROUP_IX) >> 12;
- ppc_md.hpte_invalidate(slot, va, large, local);
+ ppc_md.hpte_invalidate(slot, va, huge, local);
}
void flush_hash_range(unsigned long context, unsigned long number, int local)
(unsigned long)insn_addr);
}
+/*
+ * low_hash_fault is called when we the low level hash code failed
+ * to instert a PTE due to an hypervisor error
+ */
+void low_hash_fault(struct pt_regs *regs, unsigned long address)
+{
+ if (user_mode(regs)) {
+ siginfo_t info;
+
+ info.si_signo = SIGBUS;
+ info.si_errno = 0;
+ info.si_code = BUS_ADRERR;
+ info.si_addr = (void __user *)address;
+ force_sig_info(SIGBUS, &info, current);
+ return;
+ }
+ bad_page_fault(regs, address, SIGBUS);
+}
+
void __init htab_finish_init(void)
{
extern unsigned int *htab_call_hpte_insert1;