2 * linux/arch/arm/mm/mm-armv.c
4 * Copyright (C) 1998-2002 Russell King
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
10 * Page table sludge for ARM v3 and v4 processor architectures.
12 #include <linux/config.h>
13 #include <linux/module.h>
15 #include <linux/init.h>
16 #include <linux/bootmem.h>
17 #include <linux/highmem.h>
19 #include <asm/pgalloc.h>
22 #include <asm/setup.h>
23 #include <asm/tlbflush.h>
25 #include <asm/mach/map.h>
27 #define CPOLICY_UNCACHED 0
28 #define CPOLICY_BUFFERED 1
29 #define CPOLICY_WRITETHROUGH 2
30 #define CPOLICY_WRITEBACK 3
31 #define CPOLICY_WRITEALLOC 4
33 static unsigned int cachepolicy __initdata = CPOLICY_WRITEBACK;
34 static unsigned int ecc_mask __initdata = 0;
35 pgprot_t pgprot_kernel;
37 EXPORT_SYMBOL(pgprot_kernel);
40 const char policy[16];
46 static struct cachepolicy cache_policies[] __initdata = {
50 .pmd = PMD_SECT_UNCACHED,
55 .pmd = PMD_SECT_BUFFERED,
56 .pte = PTE_BUFFERABLE,
58 .policy = "writethrough",
63 .policy = "writeback",
66 .pte = PTE_BUFFERABLE|PTE_CACHEABLE,
68 .policy = "writealloc",
71 .pte = PTE_BUFFERABLE|PTE_CACHEABLE,
76 * These are useful for identifing cache coherency
77 * problems by allowing the cache or the cache and
78 * writebuffer to be turned off. (Note: the write
79 * buffer should not be on and the cache off).
81 static void __init early_cachepolicy(char **p)
85 for (i = 0; i < ARRAY_SIZE(cache_policies); i++) {
86 int len = strlen(cache_policies[i].policy);
88 if (memcmp(*p, cache_policies[i].policy, len) == 0) {
90 cr_alignment &= ~cache_policies[i].cr_mask;
91 cr_no_alignment &= ~cache_policies[i].cr_mask;
96 if (i == ARRAY_SIZE(cache_policies))
97 printk(KERN_ERR "ERROR: unknown or unsupported cache policy\n");
102 static void __init early_nocache(char **__unused)
104 char *p = "buffered";
105 printk(KERN_WARNING "nocache is deprecated; use cachepolicy=%s\n", p);
106 early_cachepolicy(&p);
109 static void __init early_nowrite(char **__unused)
111 char *p = "uncached";
112 printk(KERN_WARNING "nowb is deprecated; use cachepolicy=%s\n", p);
113 early_cachepolicy(&p);
116 static void __init early_ecc(char **p)
118 if (memcmp(*p, "on", 2) == 0) {
119 ecc_mask = PMD_PROTECTION;
121 } else if (memcmp(*p, "off", 3) == 0) {
127 __early_param("nocache", early_nocache);
128 __early_param("nowb", early_nowrite);
129 __early_param("cachepolicy=", early_cachepolicy);
130 __early_param("ecc=", early_ecc);
132 static int __init noalign_setup(char *__unused)
134 cr_alignment &= ~CR_A;
135 cr_no_alignment &= ~CR_A;
136 set_cr(cr_alignment);
140 __setup("noalign", noalign_setup);
142 #define FIRST_KERNEL_PGD_NR (FIRST_USER_PGD_NR + USER_PTRS_PER_PGD)
145 * need to get a 16k page for level 1
147 pgd_t *get_pgd_slow(struct mm_struct *mm)
149 pgd_t *new_pgd, *init_pgd;
150 pmd_t *new_pmd, *init_pmd;
151 pte_t *new_pte, *init_pte;
153 new_pgd = (pgd_t *)__get_free_pages(GFP_KERNEL, 2);
157 memzero(new_pgd, FIRST_KERNEL_PGD_NR * sizeof(pgd_t));
159 init_pgd = pgd_offset_k(0);
161 if (vectors_base() == 0) {
163 * This lock is here just to satisfy pmd_alloc and pte_lock
165 spin_lock(&mm->page_table_lock);
168 * On ARM, first page must always be allocated since it
169 * contains the machine vectors.
171 new_pmd = pmd_alloc(mm, new_pgd, 0);
175 new_pte = pte_alloc_map(mm, new_pmd, 0);
179 init_pmd = pmd_offset(init_pgd, 0);
180 init_pte = pte_offset_map_nested(init_pmd, 0);
181 set_pte(new_pte, *init_pte);
182 pte_unmap_nested(init_pte);
185 spin_unlock(&mm->page_table_lock);
189 * Copy over the kernel and IO PGD entries
191 memcpy(new_pgd + FIRST_KERNEL_PGD_NR, init_pgd + FIRST_KERNEL_PGD_NR,
192 (PTRS_PER_PGD - FIRST_KERNEL_PGD_NR) * sizeof(pgd_t));
194 clean_dcache_area(new_pgd, PTRS_PER_PGD * sizeof(pgd_t));
199 spin_unlock(&mm->page_table_lock);
201 free_pages((unsigned long)new_pgd, 2);
205 spin_unlock(&mm->page_table_lock);
206 free_pages((unsigned long)new_pgd, 2);
213 void free_pgd_slow(pgd_t *pgd)
221 /* pgd is always present and good */
231 pte = pmd_page(*pmd);
233 dec_page_state(nr_page_table_pages);
237 free_pages((unsigned long) pgd, 2);
241 * Create a SECTION PGD between VIRT and PHYS in domain
242 * DOMAIN with protection PROT
245 alloc_init_section(unsigned long virt, unsigned long phys, int prot)
249 pmdp = pmd_offset(pgd_offset_k(virt), virt);
250 if (virt & (1 << 20))
253 set_pmd(pmdp, __pmd(phys | prot));
257 * Add a PAGE mapping between VIRT and PHYS in domain
258 * DOMAIN with protection PROT. Note that due to the
259 * way we map the PTEs, we must allocate two PTE_SIZE'd
260 * blocks - one for the Linux pte table, and one for
261 * the hardware pte table.
264 alloc_init_page(unsigned long virt, unsigned long phys, unsigned int prot_l1, pgprot_t prot)
269 pmdp = pmd_offset(pgd_offset_k(virt), virt);
271 if (pmd_none(*pmdp)) {
272 unsigned long pmdval;
273 ptep = alloc_bootmem_low_pages(2 * PTRS_PER_PTE *
276 pmdval = __pa(ptep) | prot_l1;
277 pmdp[0] = __pmd(pmdval);
278 pmdp[1] = __pmd(pmdval + 256 * sizeof(pte_t));
279 flush_pmd_entry(pmdp);
281 ptep = pte_offset_kernel(pmdp, virt);
283 set_pte(ptep, pfn_pte(phys >> PAGE_SHIFT, prot));
287 * Clear any PGD mapping. On a two-level page table system,
288 * the clearance is done by the middle-level functions (pmd)
289 * rather than the top-level (pgd) functions.
291 static inline void clear_mapping(unsigned long virt)
293 pmd_clear(pmd_offset(pgd_offset_k(virt), virt));
297 unsigned int prot_pte;
298 unsigned int prot_l1;
299 unsigned int prot_sect;
303 static struct mem_types mem_types[] __initdata = {
305 .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY |
307 .prot_l1 = PMD_TYPE_TABLE,
308 .prot_sect = PMD_TYPE_SECT | PMD_SECT_UNCACHED |
313 .prot_sect = PMD_TYPE_SECT,
314 .domain = DOMAIN_KERNEL,
317 .prot_sect = PMD_TYPE_SECT | PMD_SECT_MINICACHE,
318 .domain = DOMAIN_KERNEL,
321 .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY |
323 .prot_l1 = PMD_TYPE_TABLE,
324 .domain = DOMAIN_USER,
327 .prot_sect = PMD_TYPE_SECT | PMD_SECT_AP_WRITE,
328 .domain = DOMAIN_KERNEL,
333 * Adjust the PMD section entries according to the CPU in use.
335 static void __init build_mem_type_table(void)
337 struct cachepolicy *cp;
338 unsigned int cr = get_cr();
339 int cpu_arch = cpu_architecture();
342 #if defined(CONFIG_CPU_DCACHE_DISABLE)
343 if (cachepolicy > CPOLICY_BUFFERED)
344 cachepolicy = CPOLICY_BUFFERED;
345 #elif defined(CONFIG_CPU_DCACHE_WRITETHROUGH)
346 if (cachepolicy > CPOLICY_WRITETHROUGH)
347 cachepolicy = CPOLICY_WRITETHROUGH;
349 if (cpu_arch < CPU_ARCH_ARMv5) {
350 if (cachepolicy >= CPOLICY_WRITEALLOC)
351 cachepolicy = CPOLICY_WRITEBACK;
355 if (cpu_arch <= CPU_ARCH_ARMv5) {
356 mem_types[MT_DEVICE].prot_l1 |= PMD_BIT4;
357 mem_types[MT_DEVICE].prot_sect |= PMD_BIT4;
358 mem_types[MT_CACHECLEAN].prot_sect |= PMD_BIT4;
359 mem_types[MT_MINICLEAN].prot_sect |= PMD_BIT4;
360 mem_types[MT_VECTORS].prot_l1 |= PMD_BIT4;
361 mem_types[MT_MEMORY].prot_sect |= PMD_BIT4;
365 * ARMv6 and above have extended page tables.
367 if (cpu_arch >= CPU_ARCH_ARMv6 && (cr & CR_XP)) {
369 * bit 4 becomes XN which we must clear for the
370 * kernel memory mapping.
372 mem_types[MT_MEMORY].prot_sect &= ~PMD_BIT4;
374 * Mark cache clean areas read only from SVC mode
375 * and no access from userspace.
377 mem_types[MT_MINICLEAN].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE;
378 mem_types[MT_CACHECLEAN].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE;
381 cp = &cache_policies[cachepolicy];
383 if (cpu_arch >= CPU_ARCH_ARMv5) {
384 mem_types[MT_VECTORS].prot_pte |= cp->pte & PTE_CACHEABLE;
386 mem_types[MT_VECTORS].prot_pte |= cp->pte;
387 mem_types[MT_MINICLEAN].prot_sect &= ~PMD_SECT_TEX(1);
390 mem_types[MT_VECTORS].prot_l1 |= ecc_mask;
391 mem_types[MT_MEMORY].prot_sect |= ecc_mask | cp->pmd;
393 for (i = 0; i < 16; i++) {
394 unsigned long v = pgprot_val(protection_map[i]);
395 v &= (~(PTE_BUFFERABLE|PTE_CACHEABLE)) | cp->pte;
396 protection_map[i] = __pgprot(v);
399 pgprot_kernel = __pgprot(L_PTE_PRESENT | L_PTE_YOUNG |
400 L_PTE_DIRTY | L_PTE_WRITE |
401 L_PTE_EXEC | cp->pte);
405 mem_types[MT_CACHECLEAN].prot_sect |= PMD_SECT_WT;
409 mem_types[MT_CACHECLEAN].prot_sect |= PMD_SECT_WB;
412 printk("Memory policy: ECC %sabled, Data cache %s\n",
413 ecc_mask ? "en" : "dis", cp->policy);
417 * Create the page directory entries and any necessary
418 * page tables for the mapping specified by `md'. We
419 * are able to cope here with varying sizes and address
420 * offsets, and we take full advantage of sections.
422 static void __init create_mapping(struct map_desc *md)
424 unsigned long virt, length;
425 int prot_sect, prot_l1, domain;
429 if (md->virtual != vectors_base() && md->virtual < PAGE_OFFSET) {
430 printk(KERN_WARNING "BUG: not creating mapping for "
431 "0x%08lx at 0x%08lx in user region\n",
432 md->physical, md->virtual);
436 if (md->type == MT_DEVICE &&
437 md->virtual >= PAGE_OFFSET && md->virtual < VMALLOC_END) {
438 printk(KERN_WARNING "BUG: mapping for 0x%08lx at 0x%08lx "
439 "overlaps vmalloc space\n",
440 md->physical, md->virtual);
443 domain = mem_types[md->type].domain;
444 prot_pte = __pgprot(mem_types[md->type].prot_pte);
445 prot_l1 = mem_types[md->type].prot_l1 | PMD_DOMAIN(domain);
446 prot_sect = mem_types[md->type].prot_sect | PMD_DOMAIN(domain);
449 off = md->physical - virt;
452 if (mem_types[md->type].prot_l1 == 0 &&
453 (virt & 0xfffff || (virt + off) & 0xfffff || (virt + length) & 0xfffff)) {
454 printk(KERN_WARNING "BUG: map for 0x%08lx at 0x%08lx can not "
455 "be mapped using pages, ignoring.\n",
456 md->physical, md->virtual);
460 while ((virt & 0xfffff || (virt + off) & 0xfffff) && length >= PAGE_SIZE) {
461 alloc_init_page(virt, virt + off, prot_l1, prot_pte);
467 while (length >= (PGDIR_SIZE / 2)) {
468 alloc_init_section(virt, virt + off, prot_sect);
470 virt += (PGDIR_SIZE / 2);
471 length -= (PGDIR_SIZE / 2);
474 while (length >= PAGE_SIZE) {
475 alloc_init_page(virt, virt + off, prot_l1, prot_pte);
483 * In order to soft-boot, we need to insert a 1:1 mapping in place of
484 * the user-mode pages. This will then ensure that we have predictable
485 * results when turning the mmu off
487 void setup_mm_for_reboot(char mode)
489 unsigned long pmdval;
493 int cpu_arch = cpu_architecture();
495 if (current->mm && current->mm->pgd)
496 pgd = current->mm->pgd;
500 for (i = 0; i < FIRST_USER_PGD_NR + USER_PTRS_PER_PGD; i++) {
501 pmdval = (i << PGDIR_SHIFT) |
502 PMD_SECT_AP_WRITE | PMD_SECT_AP_READ |
504 if (cpu_arch <= CPU_ARCH_ARMv5)
506 pmd = pmd_offset(pgd + i, i << PGDIR_SHIFT);
507 set_pmd(pmd, __pmd(pmdval));
512 * Setup initial mappings. We use the page we allocated for zero page to hold
513 * the mappings, which will get overwritten by the vectors in traps_init().
514 * The mappings must be in virtual address order.
516 void __init memtable_init(struct meminfo *mi)
518 struct map_desc *init_maps, *p, *q;
519 unsigned long address = 0;
522 build_mem_type_table();
524 init_maps = p = alloc_bootmem_low_pages(PAGE_SIZE);
526 for (i = 0; i < mi->nr_banks; i++) {
527 if (mi->bank[i].size == 0)
530 p->physical = mi->bank[i].start;
531 p->virtual = __phys_to_virt(p->physical);
532 p->length = mi->bank[i].size;
538 p->physical = FLUSH_BASE_PHYS;
539 p->virtual = FLUSH_BASE;
540 p->length = PGDIR_SIZE;
541 p->type = MT_CACHECLEAN;
545 #ifdef FLUSH_BASE_MINICACHE
546 p->physical = FLUSH_BASE_PHYS + PGDIR_SIZE;
547 p->virtual = FLUSH_BASE_MINICACHE;
548 p->length = PGDIR_SIZE;
549 p->type = MT_MINICLEAN;
554 * Go through the initial mappings, but clear out any
555 * pgdir entries that are not in the description.
559 if (address < q->virtual || q == p) {
560 clear_mapping(address);
561 address += PGDIR_SIZE;
565 address = q->virtual + q->length;
566 address = (address + PGDIR_SIZE - 1) & PGDIR_MASK;
570 } while (address != 0);
573 * Create a mapping for the machine vectors at virtual address 0
574 * or 0xffff0000. We should always try the high mapping.
576 init_maps->physical = virt_to_phys(init_maps);
577 init_maps->virtual = vectors_base();
578 init_maps->length = PAGE_SIZE;
579 init_maps->type = MT_VECTORS;
581 create_mapping(init_maps);
588 * Create the architecture specific mappings
590 void __init iotable_init(struct map_desc *io_desc, int nr)
594 for (i = 0; i < nr; i++)
595 create_mapping(io_desc + i);
599 free_memmap(int node, unsigned long start_pfn, unsigned long end_pfn)
601 struct page *start_pg, *end_pg;
602 unsigned long pg, pgend;
605 * Convert start_pfn/end_pfn to a struct page pointer.
607 start_pg = pfn_to_page(start_pfn);
608 end_pg = pfn_to_page(end_pfn);
611 * Convert to physical addresses, and
612 * round start upwards and end downwards.
614 pg = PAGE_ALIGN(__pa(start_pg));
615 pgend = __pa(end_pg) & PAGE_MASK;
618 * If there are free pages between these,
619 * free the section of the memmap array.
622 free_bootmem_node(NODE_DATA(node), pg, pgend - pg);
625 static inline void free_unused_memmap_node(int node, struct meminfo *mi)
627 unsigned long bank_start, prev_bank_end = 0;
631 * [FIXME] This relies on each bank being in address order. This
632 * may not be the case, especially if the user has provided the
633 * information on the command line.
635 for (i = 0; i < mi->nr_banks; i++) {
636 if (mi->bank[i].size == 0 || mi->bank[i].node != node)
639 bank_start = mi->bank[i].start >> PAGE_SHIFT;
640 if (bank_start < prev_bank_end) {
641 printk(KERN_ERR "MEM: unordered memory banks. "
642 "Not freeing memmap.\n");
647 * If we had a previous bank, and there is a space
648 * between the current bank and the previous, free it.
650 if (prev_bank_end && prev_bank_end != bank_start)
651 free_memmap(node, prev_bank_end, bank_start);
653 prev_bank_end = PAGE_ALIGN(mi->bank[i].start +
654 mi->bank[i].size) >> PAGE_SHIFT;
659 * The mem_map array can get very big. Free
660 * the unused area of the memory map.
662 void __init create_memmap_holes(struct meminfo *mi)
666 for (node = 0; node < numnodes; node++)
667 free_unused_memmap_node(node, mi);