*
* Page table sludge for ARM v3 and v4 processor architectures.
*/
+#include <linux/config.h>
#include <linux/module.h>
#include <linux/mm.h>
#include <linux/init.h>
pte = pmd_page(*pmd);
pmd_clear(pmd);
- dec_zone_page_state(virt_to_page((unsigned long *)pgd), NR_PAGETABLE);
+ dec_page_state(nr_page_table_pages);
pte_lock_deinit(pte);
pte_free(pte);
pmd_free(pmd);
.prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY |
L_PTE_WRITE,
.prot_l1 = PMD_TYPE_TABLE,
- .prot_sect = PMD_TYPE_SECT | PMD_BIT4 | PMD_SECT_UNCACHED |
+ .prot_sect = PMD_TYPE_SECT | PMD_SECT_UNCACHED |
PMD_SECT_AP_WRITE,
.domain = DOMAIN_IO,
},
[MT_CACHECLEAN] = {
- .prot_sect = PMD_TYPE_SECT | PMD_BIT4,
+ .prot_sect = PMD_TYPE_SECT,
.domain = DOMAIN_KERNEL,
},
[MT_MINICLEAN] = {
- .prot_sect = PMD_TYPE_SECT | PMD_BIT4 | PMD_SECT_MINICACHE,
+ .prot_sect = PMD_TYPE_SECT | PMD_SECT_MINICACHE,
.domain = DOMAIN_KERNEL,
},
[MT_LOW_VECTORS] = {
.domain = DOMAIN_USER,
},
[MT_MEMORY] = {
- .prot_sect = PMD_TYPE_SECT | PMD_BIT4 | PMD_SECT_AP_WRITE,
+ .prot_sect = PMD_TYPE_SECT | PMD_SECT_AP_WRITE,
.domain = DOMAIN_KERNEL,
},
[MT_ROM] = {
- .prot_sect = PMD_TYPE_SECT | PMD_BIT4,
+ .prot_sect = PMD_TYPE_SECT,
.domain = DOMAIN_KERNEL,
},
[MT_IXP2000_DEVICE] = { /* IXP2400 requires XCB=101 for on-chip I/O */
.prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY |
L_PTE_WRITE,
.prot_l1 = PMD_TYPE_TABLE,
- .prot_sect = PMD_TYPE_SECT | PMD_BIT4 | PMD_SECT_UNCACHED |
+ .prot_sect = PMD_TYPE_SECT | PMD_SECT_UNCACHED |
PMD_SECT_AP_WRITE | PMD_SECT_BUFFERABLE |
PMD_SECT_TEX(1),
.domain = DOMAIN_IO,
},
[MT_NONSHARED_DEVICE] = {
.prot_l1 = PMD_TYPE_TABLE,
- .prot_sect = PMD_TYPE_SECT | PMD_BIT4 | PMD_SECT_NONSHARED_DEV |
+ .prot_sect = PMD_TYPE_SECT | PMD_SECT_NONSHARED_DEV |
PMD_SECT_AP_WRITE,
.domain = DOMAIN_IO,
}
ecc_mask = 0;
}
- /*
- * Xscale must not have PMD bit 4 set for section mappings.
- */
- if (cpu_is_xscale())
- for (i = 0; i < ARRAY_SIZE(mem_types); i++)
- mem_types[i].prot_sect &= ~PMD_BIT4;
-
- /*
- * ARMv5 and lower, excluding Xscale, bit 4 must be set for
- * page tables.
- */
- if (cpu_arch < CPU_ARCH_ARMv6 && !cpu_is_xscale())
- for (i = 0; i < ARRAY_SIZE(mem_types); i++)
+ if (cpu_arch <= CPU_ARCH_ARMv5TEJ) {
+ for (i = 0; i < ARRAY_SIZE(mem_types); i++) {
if (mem_types[i].prot_l1)
mem_types[i].prot_l1 |= PMD_BIT4;
+ if (mem_types[i].prot_sect)
+ mem_types[i].prot_sect |= PMD_BIT4;
+ }
+ }
cp = &cache_policies[cachepolicy];
kern_pgprot = user_pgprot = cp->pte;
- /*
- * Enable CPU-specific coherency if supported.
- * (Only available on XSC3 at the moment.)
- */
- if (arch_is_coherent()) {
- if (cpu_is_xsc3()) {
- mem_types[MT_MEMORY].prot_sect |= PMD_SECT_S;
- mem_types[MT_MEMORY].prot_pte |= L_PTE_COHERENT;
- }
- }
-
/*
* ARMv6 and above have extended page tables.
*/
* bit 4 becomes XN which we must clear for the
* kernel memory mapping.
*/
- mem_types[MT_MEMORY].prot_sect &= ~PMD_SECT_XN;
- mem_types[MT_ROM].prot_sect &= ~PMD_SECT_XN;
+ mem_types[MT_MEMORY].prot_sect &= ~PMD_BIT4;
+ mem_types[MT_ROM].prot_sect &= ~PMD_BIT4;
/*
* Mark cache clean areas and XIP ROM read only
* supersections are only allocated for domain 0 regardless
* of the actual domain assignments in use.
*/
- if ((cpu_architecture() >= CPU_ARCH_ARMv6 || cpu_is_xsc3())
- && domain == 0) {
+ if (cpu_architecture() >= CPU_ARCH_ARMv6 && domain == 0) {
/*
* Align to supersection boundary if !high pages.
* High pages have already been checked for proper
pgd = init_mm.pgd;
base_pmdval = PMD_SECT_AP_WRITE | PMD_SECT_AP_READ | PMD_TYPE_SECT;
- if (cpu_architecture() <= CPU_ARCH_ARMv5TEJ && !cpu_is_xscale())
+ if (cpu_architecture() <= CPU_ARCH_ARMv5TEJ)
base_pmdval |= PMD_BIT4;
for (i = 0; i < FIRST_USER_PGD_NR + USER_PTRS_PER_PGD; i++, pgd++) {