init_pgd = pgd_offset_k(0);
- if (!vectors_high()) {
+ if (vectors_base() == 0) {
/*
* This lock is here just to satisfy pmd_alloc and pte_lock
*/
.prot_sect = PMD_TYPE_SECT | PMD_SECT_MINICACHE,
.domain = DOMAIN_KERNEL,
},
- [MT_LOW_VECTORS] = {
+ [MT_VECTORS] = {
.prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY |
L_PTE_EXEC,
.prot_l1 = PMD_TYPE_TABLE,
.domain = DOMAIN_USER,
},
- [MT_HIGH_VECTORS] = {
- .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY |
- L_PTE_USER | L_PTE_EXEC,
- .prot_l1 = PMD_TYPE_TABLE,
- .domain = DOMAIN_USER,
- },
[MT_MEMORY] = {
.prot_sect = PMD_TYPE_SECT | PMD_SECT_AP_WRITE,
.domain = DOMAIN_KERNEL,
- },
- [MT_ROM] = {
- .prot_sect = PMD_TYPE_SECT,
- .domain = DOMAIN_KERNEL,
}
};
}
if (cpu_arch <= CPU_ARCH_ARMv5) {
- for (i = 0; i < ARRAY_SIZE(mem_types); i++) {
- if (mem_types[i].prot_l1)
- mem_types[i].prot_l1 |= PMD_BIT4;
- if (mem_types[i].prot_sect)
- mem_types[i].prot_sect |= PMD_BIT4;
- }
+ mem_types[MT_DEVICE].prot_l1 |= PMD_BIT4;
+ mem_types[MT_DEVICE].prot_sect |= PMD_BIT4;
+ mem_types[MT_CACHECLEAN].prot_sect |= PMD_BIT4;
+ mem_types[MT_MINICLEAN].prot_sect |= PMD_BIT4;
+ mem_types[MT_VECTORS].prot_l1 |= PMD_BIT4;
+ mem_types[MT_MEMORY].prot_sect |= PMD_BIT4;
}
/*
* kernel memory mapping.
*/
mem_types[MT_MEMORY].prot_sect &= ~PMD_BIT4;
- mem_types[MT_ROM].prot_sect &= ~PMD_BIT4;
/*
* Mark cache clean areas read only from SVC mode
* and no access from userspace.
cp = &cache_policies[cachepolicy];
if (cpu_arch >= CPU_ARCH_ARMv5) {
- mem_types[MT_LOW_VECTORS].prot_pte |= cp->pte & PTE_CACHEABLE;
- mem_types[MT_HIGH_VECTORS].prot_pte |= cp->pte & PTE_CACHEABLE;
+ mem_types[MT_VECTORS].prot_pte |= cp->pte & PTE_CACHEABLE;
} else {
- mem_types[MT_LOW_VECTORS].prot_pte |= cp->pte;
- mem_types[MT_HIGH_VECTORS].prot_pte |= cp->pte;
+ mem_types[MT_VECTORS].prot_pte |= cp->pte;
mem_types[MT_MINICLEAN].prot_sect &= ~PMD_SECT_TEX(1);
}
- mem_types[MT_LOW_VECTORS].prot_l1 |= ecc_mask;
- mem_types[MT_HIGH_VECTORS].prot_l1 |= ecc_mask;
+ mem_types[MT_VECTORS].prot_l1 |= ecc_mask;
mem_types[MT_MEMORY].prot_sect |= ecc_mask | cp->pmd;
- mem_types[MT_ROM].prot_sect |= cp->pmd;
for (i = 0; i < 16; i++) {
unsigned long v = pgprot_val(protection_map[i]);
ecc_mask ? "en" : "dis", cp->policy);
}
-#define vectors_base() (vectors_high() ? 0xffff0000 : 0)
-
/*
* Create the page directory entries and any necessary
* page tables for the mapping specified by `md'. We
pgprot_t prot_pte;
long off;
- if (md->virtual != vectors_base() && md->virtual < TASK_SIZE) {
+ if (md->virtual != vectors_base() && md->virtual < PAGE_OFFSET) {
printk(KERN_WARNING "BUG: not creating mapping for "
"0x%08lx at 0x%08lx in user region\n",
md->physical, md->virtual);
return;
}
- if ((md->type == MT_DEVICE || md->type == MT_ROM) &&
+ if (md->type == MT_DEVICE &&
md->virtual >= PAGE_OFFSET && md->virtual < VMALLOC_END) {
printk(KERN_WARNING "BUG: mapping for 0x%08lx at 0x%08lx "
"overlaps vmalloc space\n",
}
}
-extern void _stext, _etext;
-
/*
* Setup initial mappings. We use the page we allocated for zero page to hold
* the mappings, which will get overwritten by the vectors in traps_init().
init_maps = p = alloc_bootmem_low_pages(PAGE_SIZE);
-#ifdef CONFIG_XIP_KERNEL
- p->physical = CONFIG_XIP_PHYS_ADDR & PMD_MASK;
- p->virtual = (unsigned long)&_stext & PMD_MASK;
- p->length = ((unsigned long)&_etext - p->virtual + ~PMD_MASK) & PMD_MASK;
- p->type = MT_ROM;
- p ++;
-#endif
-
for (i = 0; i < mi->nr_banks; i++) {
if (mi->bank[i].size == 0)
continue;
} while (address != 0);
/*
- * Create a mapping for the machine vectors at the high-vectors
- * location (0xffff0000). If we aren't using high-vectors, also
- * create a mapping at the low-vectors virtual address.
+ * Create a mapping for the machine vectors at virtual address 0
+ * or 0xffff0000. We should always try the high mapping.
*/
init_maps->physical = virt_to_phys(init_maps);
- init_maps->virtual = 0xffff0000;
+ init_maps->virtual = vectors_base();
init_maps->length = PAGE_SIZE;
- init_maps->type = MT_HIGH_VECTORS;
- create_mapping(init_maps);
+ init_maps->type = MT_VECTORS;
- if (!vectors_high()) {
- init_maps->virtual = 0;
- init_maps->type = MT_LOW_VECTORS;
- create_mapping(init_maps);
- }
+ create_mapping(init_maps);
flush_cache_all();
flush_tlb_all();