2 * linux/arch/arm/mm/mm-armv.c
4 * Copyright (C) 1998-2002 Russell King
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
10 * Page table sludge for ARM v3 and v4 processor architectures.
12 #include <linux/config.h>
13 #include <linux/module.h>
15 #include <linux/init.h>
16 #include <linux/bootmem.h>
17 #include <linux/highmem.h>
19 #include <asm/pgalloc.h>
23 #include <asm/setup.h>
24 #include <asm/tlbflush.h>
26 #include <asm/mach/map.h>
28 #define CPOLICY_UNCACHED 0
29 #define CPOLICY_BUFFERED 1
30 #define CPOLICY_WRITETHROUGH 2
31 #define CPOLICY_WRITEBACK 3
32 #define CPOLICY_WRITEALLOC 4
34 static unsigned int cachepolicy __initdata = CPOLICY_WRITEBACK;
35 static unsigned int ecc_mask __initdata = 0;
36 pgprot_t pgprot_kernel;
38 EXPORT_SYMBOL(pgprot_kernel);
41 const char policy[16];
47 static struct cachepolicy cache_policies[] __initdata = {
51 .pmd = PMD_SECT_UNCACHED,
56 .pmd = PMD_SECT_BUFFERED,
57 .pte = PTE_BUFFERABLE,
59 .policy = "writethrough",
64 .policy = "writeback",
67 .pte = PTE_BUFFERABLE|PTE_CACHEABLE,
69 .policy = "writealloc",
72 .pte = PTE_BUFFERABLE|PTE_CACHEABLE,
77 * These are useful for identifing cache coherency
78 * problems by allowing the cache or the cache and
79 * writebuffer to be turned off. (Note: the write
80 * buffer should not be on and the cache off).
82 static void __init early_cachepolicy(char **p)
86 for (i = 0; i < ARRAY_SIZE(cache_policies); i++) {
87 int len = strlen(cache_policies[i].policy);
89 if (memcmp(*p, cache_policies[i].policy, len) == 0) {
91 cr_alignment &= ~cache_policies[i].cr_mask;
92 cr_no_alignment &= ~cache_policies[i].cr_mask;
97 if (i == ARRAY_SIZE(cache_policies))
98 printk(KERN_ERR "ERROR: unknown or unsupported cache policy\n");
100 set_cr(cr_alignment);
103 static void __init early_nocache(char **__unused)
105 char *p = "buffered";
106 printk(KERN_WARNING "nocache is deprecated; use cachepolicy=%s\n", p);
107 early_cachepolicy(&p);
110 static void __init early_nowrite(char **__unused)
112 char *p = "uncached";
113 printk(KERN_WARNING "nowb is deprecated; use cachepolicy=%s\n", p);
114 early_cachepolicy(&p);
117 static void __init early_ecc(char **p)
119 if (memcmp(*p, "on", 2) == 0) {
120 ecc_mask = PMD_PROTECTION;
122 } else if (memcmp(*p, "off", 3) == 0) {
128 __early_param("nocache", early_nocache);
129 __early_param("nowb", early_nowrite);
130 __early_param("cachepolicy=", early_cachepolicy);
131 __early_param("ecc=", early_ecc);
133 static int __init noalign_setup(char *__unused)
135 cr_alignment &= ~CR_A;
136 cr_no_alignment &= ~CR_A;
137 set_cr(cr_alignment);
141 __setup("noalign", noalign_setup);
143 #define FIRST_KERNEL_PGD_NR (FIRST_USER_PGD_NR + USER_PTRS_PER_PGD)
146 * need to get a 16k page for level 1
148 pgd_t *get_pgd_slow(struct mm_struct *mm)
150 pgd_t *new_pgd, *init_pgd;
151 pmd_t *new_pmd, *init_pmd;
152 pte_t *new_pte, *init_pte;
154 new_pgd = (pgd_t *)__get_free_pages(GFP_KERNEL, 2);
158 memzero(new_pgd, FIRST_KERNEL_PGD_NR * sizeof(pgd_t));
160 init_pgd = pgd_offset_k(0);
162 if (vectors_base() == 0) {
164 * This lock is here just to satisfy pmd_alloc and pte_lock
166 spin_lock(&mm->page_table_lock);
169 * On ARM, first page must always be allocated since it
170 * contains the machine vectors.
172 new_pmd = pmd_alloc(mm, new_pgd, 0);
176 new_pte = pte_alloc_map(mm, new_pmd, 0);
180 init_pmd = pmd_offset(init_pgd, 0);
181 init_pte = pte_offset_map_nested(init_pmd, 0);
182 set_pte(new_pte, *init_pte);
183 pte_unmap_nested(init_pte);
186 spin_unlock(&mm->page_table_lock);
190 * Copy over the kernel and IO PGD entries
192 memcpy(new_pgd + FIRST_KERNEL_PGD_NR, init_pgd + FIRST_KERNEL_PGD_NR,
193 (PTRS_PER_PGD - FIRST_KERNEL_PGD_NR) * sizeof(pgd_t));
195 clean_dcache_area(new_pgd, PTRS_PER_PGD * sizeof(pgd_t));
200 spin_unlock(&mm->page_table_lock);
202 free_pages((unsigned long)new_pgd, 2);
206 spin_unlock(&mm->page_table_lock);
207 free_pages((unsigned long)new_pgd, 2);
214 void free_pgd_slow(pgd_t *pgd)
222 /* pgd is always present and good */
232 pte = pmd_page(*pmd);
234 pgtable_remove_rmap(pte);
238 free_pages((unsigned long) pgd, 2);
242 * Create a SECTION PGD between VIRT and PHYS in domain
243 * DOMAIN with protection PROT
246 alloc_init_section(unsigned long virt, unsigned long phys, int prot)
250 pmdp = pmd_offset(pgd_offset_k(virt), virt);
251 if (virt & (1 << 20))
254 set_pmd(pmdp, __pmd(phys | prot));
258 * Add a PAGE mapping between VIRT and PHYS in domain
259 * DOMAIN with protection PROT. Note that due to the
260 * way we map the PTEs, we must allocate two PTE_SIZE'd
261 * blocks - one for the Linux pte table, and one for
262 * the hardware pte table.
265 alloc_init_page(unsigned long virt, unsigned long phys, unsigned int prot_l1, pgprot_t prot)
270 pmdp = pmd_offset(pgd_offset_k(virt), virt);
272 if (pmd_none(*pmdp)) {
273 unsigned long pmdval;
274 ptep = alloc_bootmem_low_pages(2 * PTRS_PER_PTE *
277 pmdval = __pa(ptep) | prot_l1;
278 pmdp[0] = __pmd(pmdval);
279 pmdp[1] = __pmd(pmdval + 256 * sizeof(pte_t));
280 flush_pmd_entry(pmdp);
282 ptep = pte_offset_kernel(pmdp, virt);
284 set_pte(ptep, pfn_pte(phys >> PAGE_SHIFT, prot));
288 * Clear any PGD mapping. On a two-level page table system,
289 * the clearance is done by the middle-level functions (pmd)
290 * rather than the top-level (pgd) functions.
292 static inline void clear_mapping(unsigned long virt)
294 pmd_clear(pmd_offset(pgd_offset_k(virt), virt));
298 unsigned int prot_pte;
299 unsigned int prot_l1;
300 unsigned int prot_sect;
304 static struct mem_types mem_types[] __initdata = {
306 .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY |
308 .prot_l1 = PMD_TYPE_TABLE | PMD_BIT4,
309 .prot_sect = PMD_TYPE_SECT | PMD_BIT4 | PMD_SECT_UNCACHED |
314 .prot_sect = PMD_TYPE_SECT | PMD_BIT4,
315 .domain = DOMAIN_KERNEL,
318 .prot_sect = PMD_TYPE_SECT | PMD_BIT4 | PMD_SECT_MINICACHE,
319 .domain = DOMAIN_KERNEL,
322 .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY |
324 .prot_l1 = PMD_TYPE_TABLE | PMD_BIT4,
325 .domain = DOMAIN_USER,
328 .prot_sect = PMD_TYPE_SECT | PMD_BIT4 | PMD_SECT_AP_WRITE,
329 .domain = DOMAIN_KERNEL,
334 * Adjust the PMD section entries according to the CPU in use.
336 static void __init build_mem_type_table(void)
338 struct cachepolicy *cp;
339 unsigned int cr = get_cr();
340 int cpu_arch = cpu_architecture();
343 #if defined(CONFIG_CPU_DCACHE_DISABLE)
344 if (cachepolicy > CPOLICY_BUFFERED)
345 cachepolicy = CPOLICY_BUFFERED;
346 #elif defined(CONFIG_CPU_DCACHE_WRITETHROUGH)
347 if (cachepolicy > CPOLICY_WRITETHROUGH)
348 cachepolicy = CPOLICY_WRITETHROUGH;
350 if (cpu_arch < CPU_ARCH_ARMv5) {
351 if (cachepolicy >= CPOLICY_WRITEALLOC)
352 cachepolicy = CPOLICY_WRITEBACK;
357 * ARMv6 and above have extended page tables.
359 if (cpu_arch >= CPU_ARCH_ARMv6 && (cr & CR_XP)) {
361 * bit 4 becomes XN which we must clear for the
362 * kernel memory mapping.
364 mem_types[MT_MEMORY].prot_sect &= ~PMD_BIT4;
366 * Mark cache clean areas read only from SVC mode
367 * and no access from userspace.
369 mem_types[MT_MINICLEAN].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE;
370 mem_types[MT_CACHECLEAN].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE;
373 cp = &cache_policies[cachepolicy];
375 if (cpu_arch >= CPU_ARCH_ARMv5) {
376 mem_types[MT_VECTORS].prot_pte |= cp->pte & PTE_CACHEABLE;
378 mem_types[MT_VECTORS].prot_pte |= cp->pte;
379 mem_types[MT_MINICLEAN].prot_sect &= ~PMD_SECT_TEX(1);
382 mem_types[MT_VECTORS].prot_l1 |= ecc_mask;
383 mem_types[MT_MEMORY].prot_sect |= ecc_mask | cp->pmd;
385 for (i = 0; i < 16; i++) {
386 unsigned long v = pgprot_val(protection_map[i]);
387 v &= (~(PTE_BUFFERABLE|PTE_CACHEABLE)) | cp->pte;
388 protection_map[i] = __pgprot(v);
391 pgprot_kernel = __pgprot(L_PTE_PRESENT | L_PTE_YOUNG |
392 L_PTE_DIRTY | L_PTE_WRITE |
393 L_PTE_EXEC | cp->pte);
397 mem_types[MT_CACHECLEAN].prot_sect |= PMD_SECT_WT;
401 mem_types[MT_CACHECLEAN].prot_sect |= PMD_SECT_WB;
404 printk("Memory policy: ECC %sabled, Data cache %s\n",
405 ecc_mask ? "en" : "dis", cp->policy);
409 * Create the page directory entries and any necessary
410 * page tables for the mapping specified by `md'. We
411 * are able to cope here with varying sizes and address
412 * offsets, and we take full advantage of sections.
414 static void __init create_mapping(struct map_desc *md)
416 unsigned long virt, length;
417 int prot_sect, prot_l1, domain;
421 if (md->virtual != vectors_base() && md->virtual < PAGE_OFFSET) {
422 printk(KERN_WARNING "BUG: not creating mapping for "
423 "0x%08lx at 0x%08lx in user region\n",
424 md->physical, md->virtual);
428 if (md->type == MT_DEVICE &&
429 md->virtual >= PAGE_OFFSET && md->virtual < VMALLOC_END) {
430 printk(KERN_WARNING "BUG: mapping for 0x%08lx at 0x%08lx "
431 "overlaps vmalloc space\n",
432 md->physical, md->virtual);
435 domain = mem_types[md->type].domain;
436 prot_pte = __pgprot(mem_types[md->type].prot_pte);
437 prot_l1 = mem_types[md->type].prot_l1 | PMD_DOMAIN(domain);
438 prot_sect = mem_types[md->type].prot_sect | PMD_DOMAIN(domain);
441 off = md->physical - virt;
444 if (mem_types[md->type].prot_l1 == 0 &&
445 (virt & 0xfffff || (virt + off) & 0xfffff || (virt + length) & 0xfffff)) {
446 printk(KERN_WARNING "BUG: map for 0x%08lx at 0x%08lx can not "
447 "be mapped using pages, ignoring.\n",
448 md->physical, md->virtual);
452 while ((virt & 0xfffff || (virt + off) & 0xfffff) && length >= PAGE_SIZE) {
453 alloc_init_page(virt, virt + off, prot_l1, prot_pte);
459 while (length >= (PGDIR_SIZE / 2)) {
460 alloc_init_section(virt, virt + off, prot_sect);
462 virt += (PGDIR_SIZE / 2);
463 length -= (PGDIR_SIZE / 2);
466 while (length >= PAGE_SIZE) {
467 alloc_init_page(virt, virt + off, prot_l1, prot_pte);
475 * In order to soft-boot, we need to insert a 1:1 mapping in place of
476 * the user-mode pages. This will then ensure that we have predictable
477 * results when turning the mmu off
479 void setup_mm_for_reboot(char mode)
481 unsigned long pmdval;
486 if (current->mm && current->mm->pgd)
487 pgd = current->mm->pgd;
491 for (i = 0; i < FIRST_USER_PGD_NR + USER_PTRS_PER_PGD; i++) {
492 pmdval = (i << PGDIR_SHIFT) |
493 PMD_SECT_AP_WRITE | PMD_SECT_AP_READ |
494 PMD_BIT4 | PMD_TYPE_SECT;
495 pmd = pmd_offset(pgd + i, i << PGDIR_SHIFT);
496 set_pmd(pmd, __pmd(pmdval));
501 * Setup initial mappings. We use the page we allocated for zero page to hold
502 * the mappings, which will get overwritten by the vectors in traps_init().
503 * The mappings must be in virtual address order.
505 void __init memtable_init(struct meminfo *mi)
507 struct map_desc *init_maps, *p, *q;
508 unsigned long address = 0;
511 build_mem_type_table();
513 init_maps = p = alloc_bootmem_low_pages(PAGE_SIZE);
515 for (i = 0; i < mi->nr_banks; i++) {
516 if (mi->bank[i].size == 0)
519 p->physical = mi->bank[i].start;
520 p->virtual = __phys_to_virt(p->physical);
521 p->length = mi->bank[i].size;
527 p->physical = FLUSH_BASE_PHYS;
528 p->virtual = FLUSH_BASE;
529 p->length = PGDIR_SIZE;
530 p->type = MT_CACHECLEAN;
534 #ifdef FLUSH_BASE_MINICACHE
535 p->physical = FLUSH_BASE_PHYS + PGDIR_SIZE;
536 p->virtual = FLUSH_BASE_MINICACHE;
537 p->length = PGDIR_SIZE;
538 p->type = MT_MINICLEAN;
543 * Go through the initial mappings, but clear out any
544 * pgdir entries that are not in the description.
548 if (address < q->virtual || q == p) {
549 clear_mapping(address);
550 address += PGDIR_SIZE;
554 address = q->virtual + q->length;
555 address = (address + PGDIR_SIZE - 1) & PGDIR_MASK;
559 } while (address != 0);
562 * Create a mapping for the machine vectors at virtual address 0
563 * or 0xffff0000. We should always try the high mapping.
565 init_maps->physical = virt_to_phys(init_maps);
566 init_maps->virtual = vectors_base();
567 init_maps->length = PAGE_SIZE;
568 init_maps->type = MT_VECTORS;
570 create_mapping(init_maps);
577 * Create the architecture specific mappings
579 void __init iotable_init(struct map_desc *io_desc, int nr)
583 for (i = 0; i < nr; i++)
584 create_mapping(io_desc + i);
588 free_memmap(int node, unsigned long start_pfn, unsigned long end_pfn)
590 struct page *start_pg, *end_pg;
591 unsigned long pg, pgend;
594 * Convert start_pfn/end_pfn to a struct page pointer.
596 start_pg = pfn_to_page(start_pfn);
597 end_pg = pfn_to_page(end_pfn);
600 * Convert to physical addresses, and
601 * round start upwards and end downwards.
603 pg = PAGE_ALIGN(__pa(start_pg));
604 pgend = __pa(end_pg) & PAGE_MASK;
607 * If there are free pages between these,
608 * free the section of the memmap array.
611 free_bootmem_node(NODE_DATA(node), pg, pgend - pg);
614 static inline void free_unused_memmap_node(int node, struct meminfo *mi)
616 unsigned long bank_start, prev_bank_end = 0;
620 * [FIXME] This relies on each bank being in address order. This
621 * may not be the case, especially if the user has provided the
622 * information on the command line.
624 for (i = 0; i < mi->nr_banks; i++) {
625 if (mi->bank[i].size == 0 || mi->bank[i].node != node)
628 bank_start = mi->bank[i].start >> PAGE_SHIFT;
629 if (bank_start < prev_bank_end) {
630 printk(KERN_ERR "MEM: unordered memory banks. "
631 "Not freeing memmap.\n");
636 * If we had a previous bank, and there is a space
637 * between the current bank and the previous, free it.
639 if (prev_bank_end && prev_bank_end != bank_start)
640 free_memmap(node, prev_bank_end, bank_start);
642 prev_bank_end = PAGE_ALIGN(mi->bank[i].start +
643 mi->bank[i].size) >> PAGE_SHIFT;
648 * The mem_map array can get very big. Free
649 * the unused area of the memory map.
651 void __init create_memmap_holes(struct meminfo *mi)
655 for (node = 0; node < numnodes; node++)
656 free_unused_memmap_node(node, mi);