3 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
5 * Modifications by Paul Mackerras (PowerMac) (paulus@cs.anu.edu.au)
6 * and Cort Dougan (PReP) (cort@cs.nmt.edu)
7 * Copyright (C) 1996 Paul Mackerras
8 * Amiga/APUS changes by Jesper Skov (jskov@cygnus.co.uk).
10 * Derived from "arch/i386/mm/init.c"
11 * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds
13 * Dave Engebretsen <engebret@us.ibm.com>
14 * Rework for PPC64 port.
16 * This program is free software; you can redistribute it and/or
17 * modify it under the terms of the GNU General Public License
18 * as published by the Free Software Foundation; either version
19 * 2 of the License, or (at your option) any later version.
23 #include <linux/config.h>
24 #include <linux/signal.h>
25 #include <linux/sched.h>
26 #include <linux/kernel.h>
27 #include <linux/errno.h>
28 #include <linux/string.h>
29 #include <linux/types.h>
30 #include <linux/mman.h>
32 #include <linux/swap.h>
33 #include <linux/stddef.h>
34 #include <linux/vmalloc.h>
35 #include <linux/init.h>
36 #include <linux/delay.h>
37 #include <linux/bootmem.h>
38 #include <linux/highmem.h>
40 #include <asm/pgalloc.h>
42 #include <asm/abs_addr.h>
47 #include <asm/mmu_context.h>
48 #include <asm/pgtable.h>
50 #include <asm/uaccess.h>
52 #include <asm/machdep.h>
56 #include <asm/processor.h>
57 #include <asm/mmzone.h>
58 #include <asm/cputable.h>
59 #include <asm/ppcdebug.h>
60 #include <asm/sections.h>
61 #include <asm/system.h>
62 #include <asm/iommu.h>
63 #include <asm/abs_addr.h>
66 struct mmu_context_queue_t mmu_context_queue;
68 unsigned long ioremap_bot = IMALLOC_BASE;
69 static unsigned long phbs_io_bot = PHBS_IO_BASE;
71 extern pgd_t swapper_pg_dir[];
72 extern struct task_struct *current_set[NR_CPUS];
74 extern pgd_t ioremap_dir[];
75 pgd_t * ioremap_pgd = (pgd_t *)&ioremap_dir;
77 unsigned long klimit = (unsigned long)_end;
79 unsigned long _SDR1=0;
82 /* max amount of RAM to use */
83 unsigned long __max_memory;
85 /* info on what we think the IO hole is */
86 unsigned long io_hole_start;
87 unsigned long io_hole_size;
88 unsigned long top_of_ram;
92 int total = 0, reserved = 0;
93 int shared = 0, cached = 0;
98 printk("Mem-info:\n");
100 printk("Free swap: %6dkB\n",nr_swap_pages<<(PAGE_SHIFT-10));
101 for_each_pgdat(pgdat) {
102 for (i = 0; i < pgdat->node_spanned_pages; i++) {
103 page = pgdat->node_mem_map + i;
105 if (PageReserved(page))
107 else if (PageSwapCache(page))
109 else if (page_count(page))
110 shared += page_count(page) - 1;
113 printk("%d pages of RAM\n",total);
114 printk("%d reserved pages\n",reserved);
115 printk("%d pages shared\n",shared);
116 printk("%d pages swap cached\n",cached);
119 #ifdef CONFIG_PPC_ISERIES
121 void *ioremap(unsigned long addr, unsigned long size)
126 extern void *__ioremap(unsigned long addr, unsigned long size,
132 void iounmap(void *addr)
140 * map_io_page currently only called by __ioremap
141 * map_io_page adds an entry to the ioremap page table
142 * and adds an entry to the HPT, possibly bolting it
144 static void map_io_page(unsigned long ea, unsigned long pa, int flags)
152 spin_lock(&ioremap_mm.page_table_lock);
153 pgdp = pgd_offset_i(ea);
154 pmdp = pmd_alloc(&ioremap_mm, pgdp, ea);
155 ptep = pte_alloc_kernel(&ioremap_mm, pmdp, ea);
157 pa = abs_to_phys(pa);
158 set_pte(ptep, pfn_pte(pa >> PAGE_SHIFT, __pgprot(flags)));
159 spin_unlock(&ioremap_mm.page_table_lock);
161 unsigned long va, vpn, hash, hpteg;
164 * If the mm subsystem is not fully up, we cannot create a
165 * linux page table entry for this mapping. Simply bolt an
166 * entry in the hardware page table.
168 vsid = get_kernel_vsid(ea);
169 va = (vsid << 28) | (ea & 0xFFFFFFF);
170 vpn = va >> PAGE_SHIFT;
172 hash = hpt_hash(vpn, 0);
174 hpteg = ((hash & htab_data.htab_hash_mask)*HPTES_PER_GROUP);
176 /* Panic if a pte grpup is full */
177 if (ppc_md.hpte_insert(hpteg, va, pa >> PAGE_SHIFT, 0,
178 _PAGE_NO_CACHE|_PAGE_GUARDED|PP_RWXX,
180 panic("map_io_page: could not insert mapping");
186 static void * __ioremap_com(unsigned long addr, unsigned long pa,
187 unsigned long ea, unsigned long size,
192 if ((flags & _PAGE_PRESENT) == 0)
193 flags |= pgprot_val(PAGE_KERNEL);
194 if (flags & (_PAGE_NO_CACHE | _PAGE_WRITETHRU))
195 flags |= _PAGE_GUARDED;
197 for (i = 0; i < size; i += PAGE_SIZE) {
198 map_io_page(ea+i, pa+i, flags);
201 return (void *) (ea + (addr & ~PAGE_MASK));
206 ioremap(unsigned long addr, unsigned long size)
208 void *ret = __ioremap(addr, size, _PAGE_NO_CACHE);
210 return eeh_ioremap(addr, ret); /* may remap the addr */
215 __ioremap(unsigned long addr, unsigned long size, unsigned long flags)
217 unsigned long pa, ea;
220 * Choose an address to map it to.
221 * Once the imalloc system is running, we use it.
222 * Before that, we map using addresses going
223 * up from ioremap_bot. imalloc will use
224 * the addresses from ioremap_bot through
225 * IMALLOC_END (0xE000001fffffffff)
228 pa = addr & PAGE_MASK;
229 size = PAGE_ALIGN(addr + size) - pa;
235 struct vm_struct *area;
236 area = im_get_free_area(size);
239 ea = (unsigned long)(area->addr);
245 return __ioremap_com(addr, pa, ea, size, flags);
248 #define IS_PAGE_ALIGNED(_val) ((_val) == ((_val) & PAGE_MASK))
250 int __ioremap_explicit(unsigned long pa, unsigned long ea,
251 unsigned long size, unsigned long flags)
253 struct vm_struct *area;
255 /* For now, require page-aligned values for pa, ea, and size */
256 if (!IS_PAGE_ALIGNED(pa) || !IS_PAGE_ALIGNED(ea) ||
257 !IS_PAGE_ALIGNED(size)) {
258 printk(KERN_ERR "unaligned value in %s\n", __FUNCTION__);
262 if (!mem_init_done) {
263 /* Two things to consider in this case:
264 * 1) No records will be kept (imalloc, etc) that the region
266 * 2) It won't be easy to iounmap() the region later (because
271 area = im_get_area(ea, size, IM_REGION_UNUSED|IM_REGION_SUBSET);
273 printk(KERN_ERR "could not obtain imalloc area for ea 0x%lx\n", ea);
276 if (ea != (unsigned long) area->addr) {
277 printk(KERN_ERR "unexpected addr return from im_get_area\n");
282 if (__ioremap_com(pa, pa, ea, size, flags) != (void *) ea) {
283 printk(KERN_ERR "__ioremap_com() returned unexpected addr\n");
290 static void unmap_im_area_pte(pmd_t *pmd, unsigned long address,
304 pte = pte_offset_kernel(pmd, address);
305 address &= ~PMD_MASK;
306 end = address + size;
312 page = ptep_get_and_clear(pte);
313 address += PAGE_SIZE;
317 if (pte_present(page))
319 printk(KERN_CRIT "Whee.. Swapped out page in kernel page table\n");
320 } while (address < end);
323 static void unmap_im_area_pmd(pgd_t *dir, unsigned long address,
337 pmd = pmd_offset(dir, address);
338 address &= ~PGDIR_MASK;
339 end = address + size;
340 if (end > PGDIR_SIZE)
344 unmap_im_area_pte(pmd, address, end - address);
345 address = (address + PMD_SIZE) & PMD_MASK;
347 } while (address < end);
351 * Unmap an IO region and remove it from imalloc'd list.
352 * Access to IO memory should be serialized by driver.
353 * This code is modeled after vmalloc code - unmap_vm_area()
355 * XXX what about calls before mem_init_done (ie python_countermeasures())
357 void iounmap(void *addr)
359 unsigned long address, start, end, size;
360 struct mm_struct *mm;
363 if (!mem_init_done) {
367 /* addr could be in EEH or IO region, map it to IO region regardless.
369 addr = (void *) (IO_TOKEN_TO_ADDR(addr) & PAGE_MASK);
371 if ((size = im_free(addr)) == 0) {
375 address = (unsigned long)addr;
377 end = address + size;
380 spin_lock(&mm->page_table_lock);
382 dir = pgd_offset_i(address);
383 flush_cache_vunmap(address, end);
385 unmap_im_area_pmd(dir, address, end - address);
386 address = (address + PGDIR_SIZE) & PGDIR_MASK;
388 } while (address && (address < end));
389 flush_tlb_kernel_range(start, end);
391 spin_unlock(&mm->page_table_lock);
395 int iounmap_explicit(void *addr, unsigned long size)
397 struct vm_struct *area;
399 /* addr could be in EEH or IO region, map it to IO region regardless.
401 addr = (void *) (IO_TOKEN_TO_ADDR(addr) & PAGE_MASK);
403 /* Verify that the region either exists or is a subset of an existing
404 * region. In the latter case, split the parent region to create
407 area = im_get_area((unsigned long) addr, size,
408 IM_REGION_EXISTS | IM_REGION_SUBSET);
410 printk(KERN_ERR "%s() cannot unmap nonexistent range 0x%lx\n",
411 __FUNCTION__, (unsigned long) addr);
421 void free_initmem(void)
425 addr = (unsigned long)__init_begin;
426 for (; addr < (unsigned long)__init_end; addr += PAGE_SIZE) {
427 ClearPageReserved(virt_to_page(addr));
428 set_page_count(virt_to_page(addr), 1);
432 printk ("Freeing unused kernel memory: %luk freed\n",
433 ((unsigned long)__init_end - (unsigned long)__init_begin) >> 10);
436 #ifdef CONFIG_BLK_DEV_INITRD
437 void free_initrd_mem(unsigned long start, unsigned long end)
440 printk ("Freeing initrd memory: %ldk freed\n", (end - start) >> 10);
441 for (; start < end; start += PAGE_SIZE) {
442 ClearPageReserved(virt_to_page(start));
443 set_page_count(virt_to_page(start), 1);
451 * Do very early mm setup.
453 void __init mm_init_ppc64(void)
457 ppc64_boot_msg(0x100, "MM Init");
459 /* Reserve all contexts < FIRST_USER_CONTEXT for kernel use.
460 * The range of contexts [FIRST_USER_CONTEXT, NUM_USER_CONTEXT)
461 * are stored on a stack/queue for easy allocation and deallocation.
463 mmu_context_queue.lock = SPIN_LOCK_UNLOCKED;
464 mmu_context_queue.head = 0;
465 mmu_context_queue.tail = NUM_USER_CONTEXT-1;
466 mmu_context_queue.size = NUM_USER_CONTEXT;
467 for (i = 0; i < NUM_USER_CONTEXT; i++)
468 mmu_context_queue.elements[i] = i + FIRST_USER_CONTEXT;
470 /* This is the story of the IO hole... please, keep seated,
471 * unfortunately, we are out of oxygen masks at the moment.
472 * So we need some rough way to tell where your big IO hole
473 * is. On pmac, it's between 2G and 4G, on POWER3, it's around
474 * that area as well, on POWER4 we don't have one, etc...
475 * We need that to implement something approx. decent for
476 * page_is_ram() so that /dev/mem doesn't map cacheable IO space
477 * when XFree resquest some IO regions witout using O_SYNC, we
478 * also need that as a "hint" when sizing the TCE table on POWER3
479 * So far, the simplest way that seem work well enough for us it
480 * to just assume that the first discontinuity in our physical
481 * RAM layout is the IO hole. That may not be correct in the future
482 * (and isn't on iSeries but then we don't care ;)
484 top_of_ram = lmb_end_of_DRAM();
486 #ifndef CONFIG_PPC_ISERIES
487 for (i = 1; i < lmb.memory.cnt; i++) {
488 unsigned long base, prevbase, prevsize;
490 prevbase = lmb.memory.region[i-1].physbase;
491 prevsize = lmb.memory.region[i-1].size;
492 base = lmb.memory.region[i].physbase;
493 if (base > (prevbase + prevsize)) {
494 io_hole_start = prevbase + prevsize;
495 io_hole_size = base - (prevbase + prevsize);
499 #endif /* CONFIG_PPC_ISERIES */
501 printk("IO Hole assumed to be %lx -> %lx\n",
502 io_hole_start, io_hole_start + io_hole_size - 1);
504 ppc64_boot_msg(0x100, "MM Init Done");
509 * This is called by /dev/mem to know if a given address has to
510 * be mapped non-cacheable or not
512 int page_is_ram(unsigned long physaddr)
514 #ifdef CONFIG_PPC_ISERIES
517 if (physaddr >= top_of_ram)
519 return io_hole_start == 0 || physaddr < io_hole_start ||
520 physaddr >= (io_hole_start + io_hole_size);
525 * Initialize the bootmem system and give it all the memory we
528 #ifndef CONFIG_DISCONTIGMEM
529 void __init do_init_bootmem(void)
532 unsigned long start, bootmap_pages;
533 unsigned long total_pages = lmb_end_of_DRAM() >> PAGE_SHIFT;
537 * Find an area to use for the bootmem bitmap. Calculate the size of
538 * bitmap required as (Total Memory) / PAGE_SIZE / BITS_PER_BYTE.
539 * Add 1 additional page in case the address isn't page-aligned.
541 bootmap_pages = bootmem_bootmap_pages(total_pages);
543 start = abs_to_phys(lmb_alloc(bootmap_pages<<PAGE_SHIFT, PAGE_SIZE));
546 boot_mapsize = init_bootmem(start >> PAGE_SHIFT, total_pages);
548 /* add all physical memory to the bootmem map. Also find the first */
549 for (i=0; i < lmb.memory.cnt; i++) {
550 unsigned long physbase, size;
552 physbase = lmb.memory.region[i].physbase;
553 size = lmb.memory.region[i].size;
554 free_bootmem(physbase, size);
557 /* reserve the sections we're already using */
558 for (i=0; i < lmb.reserved.cnt; i++) {
559 unsigned long physbase = lmb.reserved.region[i].physbase;
560 unsigned long size = lmb.reserved.region[i].size;
562 reserve_bootmem(physbase, size);
567 * paging_init() sets up the page tables - in fact we've already done this.
569 void __init paging_init(void)
571 unsigned long zones_size[MAX_NR_ZONES];
572 unsigned long zholes_size[MAX_NR_ZONES];
573 unsigned long total_ram = lmb_phys_mem_size();
575 printk(KERN_INFO "Top of RAM: 0x%lx, Total RAM: 0x%lx\n",
576 top_of_ram, total_ram);
577 printk(KERN_INFO "Memory hole size: %ldMB\n",
578 (top_of_ram - total_ram) >> 20);
580 * All pages are DMA-able so we put them all in the DMA zone.
582 memset(zones_size, 0, sizeof(zones_size));
583 memset(zholes_size, 0, sizeof(zholes_size));
585 zones_size[ZONE_DMA] = top_of_ram >> PAGE_SHIFT;
586 zholes_size[ZONE_DMA] = (top_of_ram - total_ram) >> PAGE_SHIFT;
588 free_area_init_node(0, &contig_page_data, NULL, zones_size,
589 __pa(PAGE_OFFSET) >> PAGE_SHIFT, zholes_size);
590 mem_map = contig_page_data.node_mem_map;
592 #endif /* CONFIG_DISCONTIGMEM */
594 static struct kcore_list kcore_vmem;
596 static int __init setup_kcore(void)
600 for (i=0; i < lmb.memory.cnt; i++) {
601 unsigned long physbase, size;
602 struct kcore_list *kcore_mem;
604 physbase = lmb.memory.region[i].physbase;
605 size = lmb.memory.region[i].size;
607 /* GFP_ATOMIC to avoid might_sleep warnings during boot */
608 kcore_mem = kmalloc(sizeof(struct kcore_list), GFP_ATOMIC);
610 panic("mem_init: kmalloc failed\n");
612 kclist_add(kcore_mem, __va(physbase), size);
615 kclist_add(&kcore_vmem, (void *)VMALLOC_START, VMALLOC_END-VMALLOC_START);
619 module_init(setup_kcore);
621 void __init mem_init(void)
623 #ifndef CONFIG_DISCONTIGMEM
630 num_physpages = max_low_pfn; /* RAM is assumed contiguous */
631 high_memory = (void *) __va(max_low_pfn * PAGE_SIZE);
632 max_pfn = max_low_pfn;
634 #ifdef CONFIG_DISCONTIGMEM
638 for (nid = 0; nid < numnodes; nid++) {
639 if (node_data[nid].node_spanned_pages != 0) {
640 printk("freeing bootmem node %x\n", nid);
642 free_all_bootmem_node(NODE_DATA(nid));
646 printk("Memory: %luk available (%dk kernel code, %dk data, %dk init) [%08lx,%08lx]\n",
647 (unsigned long)nr_free_pages()<< (PAGE_SHIFT-10),
648 codepages<< (PAGE_SHIFT-10), datapages<< (PAGE_SHIFT-10),
649 initpages<< (PAGE_SHIFT-10),
650 PAGE_OFFSET, (unsigned long)__va(lmb_end_of_DRAM()));
653 max_mapnr = num_physpages;
655 totalram_pages += free_all_bootmem();
657 for (addr = KERNELBASE; addr <= (unsigned long)__va(lmb_end_of_DRAM());
659 if (!PageReserved(virt_to_page(addr)))
661 if (addr < (unsigned long)_etext)
664 else if (addr >= (unsigned long)__init_begin
665 && addr < (unsigned long)__init_end)
667 else if (addr < klimit)
671 printk("Memory: %luk available (%dk kernel code, %dk data, %dk init) [%08lx,%08lx]\n",
672 (unsigned long)nr_free_pages()<< (PAGE_SHIFT-10),
673 codepages<< (PAGE_SHIFT-10), datapages<< (PAGE_SHIFT-10),
674 initpages<< (PAGE_SHIFT-10),
675 PAGE_OFFSET, (unsigned long)__va(lmb_end_of_DRAM()));
679 #ifdef CONFIG_PPC_ISERIES
685 * This is called when a page has been modified by the kernel.
686 * It just marks the page as not i-cache clean. We do the i-cache
687 * flush later when the page is given to a user process, if necessary.
689 void flush_dcache_page(struct page *page)
691 if (cur_cpu_spec->cpu_features & CPU_FTR_COHERENT_ICACHE)
693 /* avoid an atomic op if possible */
694 if (test_bit(PG_arch_1, &page->flags))
695 clear_bit(PG_arch_1, &page->flags);
698 void clear_user_page(void *page, unsigned long vaddr, struct page *pg)
702 if (cur_cpu_spec->cpu_features & CPU_FTR_COHERENT_ICACHE)
705 * We shouldnt have to do this, but some versions of glibc
706 * require it (ld.so assumes zero filled pages are icache clean)
710 /* avoid an atomic op if possible */
711 if (test_bit(PG_arch_1, &pg->flags))
712 clear_bit(PG_arch_1, &pg->flags);
715 void copy_user_page(void *vto, void *vfrom, unsigned long vaddr,
718 copy_page(vto, vfrom);
721 * We should be able to use the following optimisation, however
722 * there are two problems.
723 * Firstly a bug in some versions of binutils meant PLT sections
724 * were not marked executable.
725 * Secondly the first word in the GOT section is blrl, used
726 * to establish the GOT address. Until recently the GOT was
727 * not marked executable.
731 if (!vma->vm_file && ((vma->vm_flags & VM_EXEC) == 0))
735 if (cur_cpu_spec->cpu_features & CPU_FTR_COHERENT_ICACHE)
738 /* avoid an atomic op if possible */
739 if (test_bit(PG_arch_1, &pg->flags))
740 clear_bit(PG_arch_1, &pg->flags);
743 void flush_icache_user_range(struct vm_area_struct *vma, struct page *page,
744 unsigned long addr, int len)
748 maddr = (unsigned long)page_address(page) + (addr & ~PAGE_MASK);
749 flush_icache_range(maddr, maddr + len);
753 * This is called at the end of handling a user page fault, when the
754 * fault has been handled by updating a PTE in the linux page tables.
755 * We use it to preload an HPTE into the hash table corresponding to
756 * the updated linux PTE.
758 * This must always be called with the mm->page_table_lock held
760 void update_mmu_cache(struct vm_area_struct *vma, unsigned long ea,
770 /* handle i-cache coherency */
771 if (!(cur_cpu_spec->cpu_features & CPU_FTR_COHERENT_ICACHE) &&
772 !(cur_cpu_spec->cpu_features & CPU_FTR_NOEXECUTE)) {
773 unsigned long pfn = pte_pfn(pte);
774 if (pfn_valid(pfn)) {
775 struct page *page = pfn_to_page(pfn);
776 if (!PageReserved(page)
777 && !test_bit(PG_arch_1, &page->flags)) {
778 __flush_dcache_icache(page_address(page));
779 set_bit(PG_arch_1, &page->flags);
784 /* We only want HPTEs for linux PTEs that have _PAGE_ACCESSED set */
788 pgdir = vma->vm_mm->pgd;
792 ptep = find_linux_pte(pgdir, ea);
796 vsid = get_vsid(vma->vm_mm->context.id, ea);
799 tmp = cpumask_of_cpu(cpu);
800 if (cpus_equal(vma->vm_mm->cpu_vm_mask, tmp))
803 __hash_page(ea, pte_val(pte) & (_PAGE_USER|_PAGE_RW), vsid, ptep,
808 void * reserve_phb_iospace(unsigned long size)
812 if (phbs_io_bot >= IMALLOC_BASE)
813 panic("reserve_phb_iospace(): phb io space overflow\n");
815 virt_addr = (void *) phbs_io_bot;
821 kmem_cache_t *zero_cache;
823 static void zero_ctor(void *pte, kmem_cache_t *cache, unsigned long flags)
825 memset(pte, 0, PAGE_SIZE);
828 void pgtable_cache_init(void)
830 zero_cache = kmem_cache_create("zero",
833 SLAB_HWCACHE_ALIGN | SLAB_MUST_HWCACHE_ALIGN,
837 panic("pgtable_cache_init(): could not create zero_cache!\n");