git://git.onelab.eu
/
linux-2.6.git
/ blobdiff
commit
grep
author
committer
pickaxe
?
search:
re
summary
|
shortlog
|
log
|
commit
|
commitdiff
|
tree
raw
|
inline
| side by side
This commit was manufactured by cvs2svn to create branch 'vserver'.
[linux-2.6.git]
/
arch
/
arm
/
mm
/
mm-armv.c
diff --git
a/arch/arm/mm/mm-armv.c
b/arch/arm/mm/mm-armv.c
index
1b5a3f7
..
a9c9c33
100644
(file)
--- a/
arch/arm/mm/mm-armv.c
+++ b/
arch/arm/mm/mm-armv.c
@@
-15,10
+15,10
@@
#include <linux/init.h>
#include <linux/bootmem.h>
#include <linux/highmem.h>
#include <linux/init.h>
#include <linux/bootmem.h>
#include <linux/highmem.h>
+#include <linux/nodemask.h>
#include <asm/pgalloc.h>
#include <asm/page.h>
#include <asm/pgalloc.h>
#include <asm/page.h>
-#include <asm/rmap.h>
#include <asm/io.h>
#include <asm/setup.h>
#include <asm/tlbflush.h>
#include <asm/io.h>
#include <asm/setup.h>
#include <asm/tlbflush.h>
@@
-159,7
+159,7
@@
pgd_t *get_pgd_slow(struct mm_struct *mm)
init_pgd = pgd_offset_k(0);
init_pgd = pgd_offset_k(0);
- if (
vectors_base() == 0
) {
+ if (
!vectors_high()
) {
/*
* This lock is here just to satisfy pmd_alloc and pte_lock
*/
/*
* This lock is here just to satisfy pmd_alloc and pte_lock
*/
@@
-231,7
+231,7
@@
void free_pgd_slow(pgd_t *pgd)
pte = pmd_page(*pmd);
pmd_clear(pmd);
pte = pmd_page(*pmd);
pmd_clear(pmd);
-
pgtable_remove_rmap(pte
);
+
dec_page_state(nr_page_table_pages
);
pte_free(pte);
pmd_free(pmd);
free:
pte_free(pte);
pmd_free(pmd);
free:
@@
-240,7
+240,8
@@
free:
/*
* Create a SECTION PGD between VIRT and PHYS in domain
/*
* Create a SECTION PGD between VIRT and PHYS in domain
- * DOMAIN with protection PROT
+ * DOMAIN with protection PROT. This operates on half-
+ * pgdir entry increments.
*/
static inline void
alloc_init_section(unsigned long virt, unsigned long phys, int prot)
*/
static inline void
alloc_init_section(unsigned long virt, unsigned long phys, int prot)
@@
-305,27
+306,37
@@
static struct mem_types mem_types[] __initdata = {
[MT_DEVICE] = {
.prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY |
L_PTE_WRITE,
[MT_DEVICE] = {
.prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY |
L_PTE_WRITE,
- .prot_l1 = PMD_TYPE_TABLE
| PMD_BIT4
,
- .prot_sect = PMD_TYPE_SECT | PMD_
BIT4 | PMD_
SECT_UNCACHED |
+ .prot_l1 = PMD_TYPE_TABLE,
+ .prot_sect = PMD_TYPE_SECT | PMD_SECT_UNCACHED |
PMD_SECT_AP_WRITE,
.domain = DOMAIN_IO,
},
[MT_CACHECLEAN] = {
PMD_SECT_AP_WRITE,
.domain = DOMAIN_IO,
},
[MT_CACHECLEAN] = {
- .prot_sect = PMD_TYPE_SECT
| PMD_BIT4
,
+ .prot_sect = PMD_TYPE_SECT,
.domain = DOMAIN_KERNEL,
},
[MT_MINICLEAN] = {
.domain = DOMAIN_KERNEL,
},
[MT_MINICLEAN] = {
- .prot_sect = PMD_TYPE_SECT | PMD_
BIT4 | PMD_
SECT_MINICACHE,
+ .prot_sect = PMD_TYPE_SECT | PMD_SECT_MINICACHE,
.domain = DOMAIN_KERNEL,
},
.domain = DOMAIN_KERNEL,
},
- [MT_VECTORS] = {
+ [MT_
LOW_
VECTORS] = {
.prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY |
L_PTE_EXEC,
.prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY |
L_PTE_EXEC,
- .prot_l1 = PMD_TYPE_TABLE | PMD_BIT4,
+ .prot_l1 = PMD_TYPE_TABLE,
+ .domain = DOMAIN_USER,
+ },
+ [MT_HIGH_VECTORS] = {
+ .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY |
+ L_PTE_USER | L_PTE_EXEC,
+ .prot_l1 = PMD_TYPE_TABLE,
.domain = DOMAIN_USER,
},
[MT_MEMORY] = {
.domain = DOMAIN_USER,
},
[MT_MEMORY] = {
- .prot_sect = PMD_TYPE_SECT | PMD_BIT4 | PMD_SECT_AP_WRITE,
+ .prot_sect = PMD_TYPE_SECT | PMD_SECT_AP_WRITE,
+ .domain = DOMAIN_KERNEL,
+ },
+ [MT_ROM] = {
+ .prot_sect = PMD_TYPE_SECT,
.domain = DOMAIN_KERNEL,
}
};
.domain = DOMAIN_KERNEL,
}
};
@@
-353,6
+364,15
@@
static void __init build_mem_type_table(void)
ecc_mask = 0;
}
ecc_mask = 0;
}
+ if (cpu_arch <= CPU_ARCH_ARMv5) {
+ for (i = 0; i < ARRAY_SIZE(mem_types); i++) {
+ if (mem_types[i].prot_l1)
+ mem_types[i].prot_l1 |= PMD_BIT4;
+ if (mem_types[i].prot_sect)
+ mem_types[i].prot_sect |= PMD_BIT4;
+ }
+ }
+
/*
* ARMv6 and above have extended page tables.
*/
/*
* ARMv6 and above have extended page tables.
*/
@@
-362,6
+382,7
@@
static void __init build_mem_type_table(void)
* kernel memory mapping.
*/
mem_types[MT_MEMORY].prot_sect &= ~PMD_BIT4;
* kernel memory mapping.
*/
mem_types[MT_MEMORY].prot_sect &= ~PMD_BIT4;
+ mem_types[MT_ROM].prot_sect &= ~PMD_BIT4;
/*
* Mark cache clean areas read only from SVC mode
* and no access from userspace.
/*
* Mark cache clean areas read only from SVC mode
* and no access from userspace.
@@
-373,14
+394,18
@@
static void __init build_mem_type_table(void)
cp = &cache_policies[cachepolicy];
if (cpu_arch >= CPU_ARCH_ARMv5) {
cp = &cache_policies[cachepolicy];
if (cpu_arch >= CPU_ARCH_ARMv5) {
- mem_types[MT_VECTORS].prot_pte |= cp->pte & PTE_CACHEABLE;
+ mem_types[MT_LOW_VECTORS].prot_pte |= cp->pte & PTE_CACHEABLE;
+ mem_types[MT_HIGH_VECTORS].prot_pte |= cp->pte & PTE_CACHEABLE;
} else {
} else {
- mem_types[MT_VECTORS].prot_pte |= cp->pte;
+ mem_types[MT_LOW_VECTORS].prot_pte |= cp->pte;
+ mem_types[MT_HIGH_VECTORS].prot_pte |= cp->pte;
mem_types[MT_MINICLEAN].prot_sect &= ~PMD_SECT_TEX(1);
}
mem_types[MT_MINICLEAN].prot_sect &= ~PMD_SECT_TEX(1);
}
- mem_types[MT_VECTORS].prot_l1 |= ecc_mask;
+ mem_types[MT_LOW_VECTORS].prot_l1 |= ecc_mask;
+ mem_types[MT_HIGH_VECTORS].prot_l1 |= ecc_mask;
mem_types[MT_MEMORY].prot_sect |= ecc_mask | cp->pmd;
mem_types[MT_MEMORY].prot_sect |= ecc_mask | cp->pmd;
+ mem_types[MT_ROM].prot_sect |= cp->pmd;
for (i = 0; i < 16; i++) {
unsigned long v = pgprot_val(protection_map[i]);
for (i = 0; i < 16; i++) {
unsigned long v = pgprot_val(protection_map[i]);
@@
-405,6
+430,8
@@
static void __init build_mem_type_table(void)
ecc_mask ? "en" : "dis", cp->policy);
}
ecc_mask ? "en" : "dis", cp->policy);
}
+#define vectors_base() (vectors_high() ? 0xffff0000 : 0)
+
/*
* Create the page directory entries and any necessary
* page tables for the mapping specified by `md'. We
/*
* Create the page directory entries and any necessary
* page tables for the mapping specified by `md'. We
@@
-418,14
+445,14
@@
static void __init create_mapping(struct map_desc *md)
pgprot_t prot_pte;
long off;
pgprot_t prot_pte;
long off;
- if (md->virtual != vectors_base() && md->virtual <
PAGE_OFFSET
) {
+ if (md->virtual != vectors_base() && md->virtual <
TASK_SIZE
) {
printk(KERN_WARNING "BUG: not creating mapping for "
"0x%08lx at 0x%08lx in user region\n",
md->physical, md->virtual);
return;
}
printk(KERN_WARNING "BUG: not creating mapping for "
"0x%08lx at 0x%08lx in user region\n",
md->physical, md->virtual);
return;
}
- if (
md->type == MT_DEVICE
&&
+ if (
(md->type == MT_DEVICE || md->type == MT_ROM)
&&
md->virtual >= PAGE_OFFSET && md->virtual < VMALLOC_END) {
printk(KERN_WARNING "BUG: mapping for 0x%08lx at 0x%08lx "
"overlaps vmalloc space\n",
md->virtual >= PAGE_OFFSET && md->virtual < VMALLOC_END) {
printk(KERN_WARNING "BUG: mapping for 0x%08lx at 0x%08lx "
"overlaps vmalloc space\n",
@@
-456,6
+483,9
@@
static void __init create_mapping(struct map_desc *md)
length -= PAGE_SIZE;
}
length -= PAGE_SIZE;
}
+ /*
+ * A section mapping covers half a "pgdir" entry.
+ */
while (length >= (PGDIR_SIZE / 2)) {
alloc_init_section(virt, virt + off, prot_sect);
while (length >= (PGDIR_SIZE / 2)) {
alloc_init_section(virt, virt + off, prot_sect);
@@
-482,6
+512,7
@@
void setup_mm_for_reboot(char mode)
pgd_t *pgd;
pmd_t *pmd;
int i;
pgd_t *pgd;
pmd_t *pmd;
int i;
+ int cpu_arch = cpu_architecture();
if (current->mm && current->mm->pgd)
pgd = current->mm->pgd;
if (current->mm && current->mm->pgd)
pgd = current->mm->pgd;
@@
-491,12
+522,17
@@
void setup_mm_for_reboot(char mode)
for (i = 0; i < FIRST_USER_PGD_NR + USER_PTRS_PER_PGD; i++) {
pmdval = (i << PGDIR_SHIFT) |
PMD_SECT_AP_WRITE | PMD_SECT_AP_READ |
for (i = 0; i < FIRST_USER_PGD_NR + USER_PTRS_PER_PGD; i++) {
pmdval = (i << PGDIR_SHIFT) |
PMD_SECT_AP_WRITE | PMD_SECT_AP_READ |
- PMD_BIT4 | PMD_TYPE_SECT;
+ PMD_TYPE_SECT;
+ if (cpu_arch <= CPU_ARCH_ARMv5)
+ pmdval |= PMD_BIT4;
pmd = pmd_offset(pgd + i, i << PGDIR_SHIFT);
set_pmd(pmd, __pmd(pmdval));
pmd = pmd_offset(pgd + i, i << PGDIR_SHIFT);
set_pmd(pmd, __pmd(pmdval));
+ set_pmd(pmd + 1, __pmd(pmdval + (1 << (PGDIR_SHIFT - 1))));
}
}
}
}
+extern void _stext, _etext;
+
/*
* Setup initial mappings. We use the page we allocated for zero page to hold
* the mappings, which will get overwritten by the vectors in traps_init().
/*
* Setup initial mappings. We use the page we allocated for zero page to hold
* the mappings, which will get overwritten by the vectors in traps_init().
@@
-512,6
+548,14
@@
void __init memtable_init(struct meminfo *mi)
init_maps = p = alloc_bootmem_low_pages(PAGE_SIZE);
init_maps = p = alloc_bootmem_low_pages(PAGE_SIZE);
+#ifdef CONFIG_XIP_KERNEL
+ p->physical = CONFIG_XIP_PHYS_ADDR & PMD_MASK;
+ p->virtual = (unsigned long)&_stext & PMD_MASK;
+ p->length = ((unsigned long)&_etext - p->virtual + ~PMD_MASK) & PMD_MASK;
+ p->type = MT_ROM;
+ p ++;
+#endif
+
for (i = 0; i < mi->nr_banks; i++) {
if (mi->bank[i].size == 0)
continue;
for (i = 0; i < mi->nr_banks; i++) {
if (mi->bank[i].size == 0)
continue;
@@
-559,16
+603,22
@@
void __init memtable_init(struct meminfo *mi)
} while (address != 0);
/*
} while (address != 0);
/*
- * Create a mapping for the machine vectors at virtual address 0
- * or 0xffff0000. We should always try the high mapping.
+ * Create a mapping for the machine vectors at the high-vectors
+ * location (0xffff0000). If we aren't using high-vectors, also
+ * create a mapping at the low-vectors virtual address.
*/
init_maps->physical = virt_to_phys(init_maps);
*/
init_maps->physical = virt_to_phys(init_maps);
- init_maps->virtual =
vectors_base()
;
+ init_maps->virtual =
0xffff0000
;
init_maps->length = PAGE_SIZE;
init_maps->length = PAGE_SIZE;
- init_maps->type = MT_VECTORS;
-
+ init_maps->type = MT_HIGH_VECTORS;
create_mapping(init_maps);
create_mapping(init_maps);
+ if (!vectors_high()) {
+ init_maps->virtual = 0;
+ init_maps->type = MT_LOW_VECTORS;
+ create_mapping(init_maps);
+ }
+
flush_cache_all();
flush_tlb_all();
}
flush_cache_all();
flush_tlb_all();
}
@@
-652,6
+702,6
@@
void __init create_memmap_holes(struct meminfo *mi)
{
int node;
{
int node;
- for
(node = 0; node < numnodes; node++
)
+ for
_each_online_node(node
)
free_unused_memmap_node(node, mi);
}
free_unused_memmap_node(node, mi);
}