#include <asm/cacheflush.h>
#include <asm/cputable.h>
#include <asm/abs_addr.h>
+#include <asm/sections.h>
#ifdef DEBUG
#define DBG(fmt...) udbg_printf(fmt)
{
unsigned long addr;
unsigned int step;
+ unsigned long tmp_mode;
if (large)
step = 16*MB;
else
vpn = va >> PAGE_SHIFT;
+
+ tmp_mode = mode;
+
+ /* Make non-kernel text non-executable */
+ if (!in_kernel_text(addr))
+ tmp_mode = mode | HW_NO_EXEC;
+
hash = hpt_hash(vpn, large);
hpteg = ((hash & htab_hash_mask) * HPTES_PER_GROUP);
if (systemcfg->platform & PLATFORM_LPAR)
ret = pSeries_lpar_hpte_insert(hpteg, va,
virt_to_abs(addr) >> PAGE_SHIFT,
- 0, mode, 1, large);
+ 0, tmp_mode, 1, large);
else
#endif /* CONFIG_PPC_PSERIES */
ret = native_hpte_insert(hpteg, va,
virt_to_abs(addr) >> PAGE_SHIFT,
- 0, mode, 1, large);
+ 0, tmp_mode, 1, large);
if (ret == -1) {
ppc64_terminate_msg(0x20, "create_pte_mapping");
unsigned long pteg_count;
unsigned long mode_rw;
int i, use_largepages = 0;
+ unsigned long base = 0, size = 0;
+ extern unsigned long tce_alloc_start, tce_alloc_end;
DBG(" -> htab_initialize()\n");
* _NOT_ map it to avoid cache paradoxes as it's remapped non
* cacheable later on
*/
- if (cur_cpu_spec->cpu_features & CPU_FTR_16M_PAGE)
+ if (cpu_has_feature(CPU_FTR_16M_PAGE))
use_largepages = 1;
/* create bolted the linear mapping in the hash table */
for (i=0; i < lmb.memory.cnt; i++) {
- unsigned long base, size;
-
base = lmb.memory.region[i].physbase + KERNELBASE;
size = lmb.memory.region[i].size;
#endif /* CONFIG_U3_DART */
create_pte_mapping(base, base + size, mode_rw, use_largepages);
}
+
+ /*
+ * If we have a memory_limit and we've allocated TCEs then we need to
+ * explicitly map the TCE area at the top of RAM. We also cope with the
+ * case that the TCEs start below memory_limit.
+ * tce_alloc_start/end are 16MB aligned so the mapping should work
+ * for either 4K or 16MB pages.
+ */
+ if (tce_alloc_start) {
+ tce_alloc_start += KERNELBASE;
+ tce_alloc_end += KERNELBASE;
+
+ if (base + size >= tce_alloc_start)
+ tce_alloc_start = base + size + 1;
+
+ create_pte_mapping(tce_alloc_start, tce_alloc_end,
+ mode_rw, use_largepages);
+ }
+
DBG(" <- htab_initialize()\n");
}
#undef KB
{
struct page *page;
-#define PPC64_HWNOEXEC (1 << 2)
-
if (!pfn_valid(pte_pfn(pte)))
return pp;
__flush_dcache_icache(page_address(page));
set_bit(PG_arch_1, &page->flags);
} else
- pp |= PPC64_HWNOEXEC;
+ pp |= HW_NO_EXEC;
}
return pp;
}
int local = 0;
cpumask_t tmp;
+ if ((ea & ~REGION_MASK) > EADDR_MASK)
+ return 1;
+
switch (REGION_ID(ea)) {
case USER_REGION_ID:
user_region = 1;
mm = current->mm;
- if ((ea > USER_END) || (! mm))
+ if (! mm)
return 1;
vsid = get_vsid(mm->context.id, ea);
break;
case IO_REGION_ID:
- if (ea > IMALLOC_END)
- return 1;
mm = &ioremap_mm;
vsid = get_kernel_vsid(ea);
break;
case VMALLOC_REGION_ID:
- if (ea > VMALLOC_END)
- return 1;
mm = &init_mm;
vsid = get_kernel_vsid(ea);
break;
unsigned long vsid, vpn, va, hash, secondary, slot;
unsigned long huge = pte_huge(pte);
- if ((ea >= USER_START) && (ea <= USER_END))
+ if (ea < KERNELBASE)
vsid = get_vsid(context, ea);
else
vsid = get_kernel_vsid(ea);