3 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
5 * Modifications by Paul Mackerras (PowerMac) (paulus@cs.anu.edu.au)
6 * and Cort Dougan (PReP) (cort@cs.nmt.edu)
7 * Copyright (C) 1996 Paul Mackerras
8 * Amiga/APUS changes by Jesper Skov (jskov@cygnus.co.uk).
10 * Derived from "arch/i386/mm/init.c"
11 * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds
13 * Dave Engebretsen <engebret@us.ibm.com>
14 * Rework for PPC64 port.
16 * This program is free software; you can redistribute it and/or
17 * modify it under the terms of the GNU General Public License
18 * as published by the Free Software Foundation; either version
19 * 2 of the License, or (at your option) any later version.
23 #include <linux/config.h>
24 #include <linux/signal.h>
25 #include <linux/sched.h>
26 #include <linux/kernel.h>
27 #include <linux/errno.h>
28 #include <linux/string.h>
29 #include <linux/types.h>
30 #include <linux/mman.h>
32 #include <linux/swap.h>
33 #include <linux/stddef.h>
34 #include <linux/vmalloc.h>
35 #include <linux/init.h>
36 #include <linux/delay.h>
37 #include <linux/bootmem.h>
38 #include <linux/highmem.h>
39 #include <linux/idr.h>
40 #include <linux/nodemask.h>
41 #include <linux/module.h>
43 #include <asm/pgalloc.h>
45 #include <asm/abs_addr.h>
50 #include <asm/mmu_context.h>
51 #include <asm/pgtable.h>
53 #include <asm/uaccess.h>
55 #include <asm/machdep.h>
58 #include <asm/processor.h>
59 #include <asm/mmzone.h>
60 #include <asm/cputable.h>
61 #include <asm/ppcdebug.h>
62 #include <asm/sections.h>
63 #include <asm/system.h>
64 #include <asm/iommu.h>
65 #include <asm/abs_addr.h>
67 #include <asm/imalloc.h>
70 unsigned long ioremap_bot = IMALLOC_BASE;
71 static unsigned long phbs_io_bot = PHBS_IO_BASE;
73 extern pgd_t swapper_pg_dir[];
74 extern struct task_struct *current_set[NR_CPUS];
76 extern pgd_t ioremap_dir[];
77 pgd_t * ioremap_pgd = (pgd_t *)&ioremap_dir;
79 unsigned long klimit = (unsigned long)_end;
81 unsigned long _SDR1=0;
84 /* max amount of RAM to use */
85 unsigned long __max_memory;
87 /* info on what we think the IO hole is */
88 unsigned long io_hole_start;
89 unsigned long io_hole_size;
93 unsigned long total = 0, reserved = 0;
94 unsigned long shared = 0, cached = 0;
99 printk("Mem-info:\n");
101 printk("Free swap: %6ldkB\n", nr_swap_pages<<(PAGE_SHIFT-10));
102 for_each_pgdat(pgdat) {
103 for (i = 0; i < pgdat->node_spanned_pages; i++) {
104 page = pgdat->node_mem_map + i;
106 if (PageReserved(page))
108 else if (PageSwapCache(page))
110 else if (page_count(page))
111 shared += page_count(page) - 1;
114 printk("%ld pages of RAM\n", total);
115 printk("%ld reserved pages\n", reserved);
116 printk("%ld pages shared\n", shared);
117 printk("%ld pages swap cached\n", cached);
119 EXPORT_SYMBOL_GPL(show_mem);
121 #ifdef CONFIG_PPC_ISERIES
123 void __iomem *ioremap(unsigned long addr, unsigned long size)
125 return (void __iomem *)addr;
128 extern void __iomem *__ioremap(unsigned long addr, unsigned long size,
131 return (void __iomem *)addr;
134 void iounmap(volatile void __iomem *addr)
141 static void unmap_im_area_pte(pmd_t *pmd, unsigned long addr,
146 pte = pte_offset_kernel(pmd, addr);
148 pte_t ptent = ptep_get_and_clear(&ioremap_mm, addr, pte);
149 WARN_ON(!pte_none(ptent) && !pte_present(ptent));
150 } while (pte++, addr += PAGE_SIZE, addr != end);
153 static inline void unmap_im_area_pmd(pud_t *pud, unsigned long addr,
159 pmd = pmd_offset(pud, addr);
161 next = pmd_addr_end(addr, end);
162 if (pmd_none_or_clear_bad(pmd))
164 unmap_im_area_pte(pmd, addr, next);
165 } while (pmd++, addr = next, addr != end);
168 static inline void unmap_im_area_pud(pgd_t *pgd, unsigned long addr,
174 pud = pud_offset(pgd, addr);
176 next = pud_addr_end(addr, end);
177 if (pud_none_or_clear_bad(pud))
179 unmap_im_area_pmd(pud, addr, next);
180 } while (pud++, addr = next, addr != end);
183 static void unmap_im_area(unsigned long addr, unsigned long end)
185 struct mm_struct *mm = &ioremap_mm;
189 spin_lock(&mm->page_table_lock);
191 pgd = pgd_offset_i(addr);
192 flush_cache_vunmap(addr, end);
194 next = pgd_addr_end(addr, end);
195 if (pgd_none_or_clear_bad(pgd))
197 unmap_im_area_pud(pgd, addr, next);
198 } while (pgd++, addr = next, addr != end);
199 flush_tlb_kernel_range(start, end);
201 spin_unlock(&mm->page_table_lock);
205 * map_io_page currently only called by __ioremap
206 * map_io_page adds an entry to the ioremap page table
207 * and adds an entry to the HPT, possibly bolting it
209 static int map_io_page(unsigned long ea, unsigned long pa, int flags)
218 spin_lock(&ioremap_mm.page_table_lock);
219 pgdp = pgd_offset_i(ea);
220 pudp = pud_alloc(&ioremap_mm, pgdp, ea);
223 pmdp = pmd_alloc(&ioremap_mm, pudp, ea);
226 ptep = pte_alloc_kernel(&ioremap_mm, pmdp, ea);
229 pa = abs_to_phys(pa);
230 set_pte_at(&ioremap_mm, ea, ptep, pfn_pte(pa >> PAGE_SHIFT,
232 spin_unlock(&ioremap_mm.page_table_lock);
234 unsigned long va, vpn, hash, hpteg;
237 * If the mm subsystem is not fully up, we cannot create a
238 * linux page table entry for this mapping. Simply bolt an
239 * entry in the hardware page table.
241 vsid = get_kernel_vsid(ea);
242 va = (vsid << 28) | (ea & 0xFFFFFFF);
243 vpn = va >> PAGE_SHIFT;
245 hash = hpt_hash(vpn, 0);
247 hpteg = ((hash & htab_hash_mask) * HPTES_PER_GROUP);
249 /* Panic if a pte grpup is full */
250 if (ppc_md.hpte_insert(hpteg, va, pa >> PAGE_SHIFT, 0,
251 _PAGE_NO_CACHE|_PAGE_GUARDED|PP_RWXX,
253 panic("map_io_page: could not insert mapping");
260 static void __iomem * __ioremap_com(unsigned long addr, unsigned long pa,
261 unsigned long ea, unsigned long size,
266 if ((flags & _PAGE_PRESENT) == 0)
267 flags |= pgprot_val(PAGE_KERNEL);
269 for (i = 0; i < size; i += PAGE_SIZE)
270 if (map_io_page(ea+i, pa+i, flags))
273 return (void __iomem *) (ea + (addr & ~PAGE_MASK));
276 unmap_im_area(ea, ea + size);
282 ioremap(unsigned long addr, unsigned long size)
284 return __ioremap(addr, size, _PAGE_NO_CACHE | _PAGE_GUARDED);
287 void __iomem * __ioremap(unsigned long addr, unsigned long size,
290 unsigned long pa, ea;
294 * Choose an address to map it to.
295 * Once the imalloc system is running, we use it.
296 * Before that, we map using addresses going
297 * up from ioremap_bot. imalloc will use
298 * the addresses from ioremap_bot through
299 * IMALLOC_END (0xE000001fffffffff)
302 pa = addr & PAGE_MASK;
303 size = PAGE_ALIGN(addr + size) - pa;
309 struct vm_struct *area;
310 area = im_get_free_area(size);
313 ea = (unsigned long)(area->addr);
314 ret = __ioremap_com(addr, pa, ea, size, flags);
319 ret = __ioremap_com(addr, pa, ea, size, flags);
326 #define IS_PAGE_ALIGNED(_val) ((_val) == ((_val) & PAGE_MASK))
328 int __ioremap_explicit(unsigned long pa, unsigned long ea,
329 unsigned long size, unsigned long flags)
331 struct vm_struct *area;
334 /* For now, require page-aligned values for pa, ea, and size */
335 if (!IS_PAGE_ALIGNED(pa) || !IS_PAGE_ALIGNED(ea) ||
336 !IS_PAGE_ALIGNED(size)) {
337 printk(KERN_ERR "unaligned value in %s\n", __FUNCTION__);
341 if (!mem_init_done) {
342 /* Two things to consider in this case:
343 * 1) No records will be kept (imalloc, etc) that the region
345 * 2) It won't be easy to iounmap() the region later (because
350 area = im_get_area(ea, size,
351 IM_REGION_UNUSED|IM_REGION_SUBSET|IM_REGION_EXISTS);
353 /* Expected when PHB-dlpar is in play */
356 if (ea != (unsigned long) area->addr) {
357 printk(KERN_ERR "unexpected addr return from "
363 ret = __ioremap_com(pa, pa, ea, size, flags);
365 printk(KERN_ERR "ioremap_explicit() allocation failure !\n");
368 if (ret != (void *) ea) {
369 printk(KERN_ERR "__ioremap_com() returned unexpected addr\n");
377 * Unmap an IO region and remove it from imalloc'd list.
378 * Access to IO memory should be serialized by driver.
379 * This code is modeled after vmalloc code - unmap_vm_area()
381 * XXX what about calls before mem_init_done (ie python_countermeasures())
383 void iounmap(volatile void __iomem *token)
385 unsigned long address, size;
391 addr = (void *) ((unsigned long __force) token & PAGE_MASK);
393 if ((size = im_free(addr)) == 0)
396 address = (unsigned long)addr;
397 unmap_im_area(address, address + size);
400 static int iounmap_subset_regions(unsigned long addr, unsigned long size)
402 struct vm_struct *area;
404 /* Check whether subsets of this region exist */
405 area = im_get_area(addr, size, IM_REGION_SUPERSET);
410 iounmap((void __iomem *) area->addr);
411 area = im_get_area(addr, size,
418 int iounmap_explicit(volatile void __iomem *start, unsigned long size)
420 struct vm_struct *area;
424 addr = (unsigned long __force) start & PAGE_MASK;
426 /* Verify that the region either exists or is a subset of an existing
427 * region. In the latter case, split the parent region to create
430 area = im_get_area(addr, size,
431 IM_REGION_EXISTS | IM_REGION_SUBSET);
433 /* Determine whether subset regions exist. If so, unmap */
434 rc = iounmap_subset_regions(addr, size);
437 "%s() cannot unmap nonexistent range 0x%lx\n",
442 iounmap((void __iomem *) area->addr);
445 * FIXME! This can't be right:
447 * Maybe it should be "iounmap(area);"
454 EXPORT_SYMBOL(ioremap);
455 EXPORT_SYMBOL(__ioremap);
456 EXPORT_SYMBOL(iounmap);
458 void free_initmem(void)
462 addr = (unsigned long)__init_begin;
463 for (; addr < (unsigned long)__init_end; addr += PAGE_SIZE) {
464 ClearPageReserved(virt_to_page(addr));
465 set_page_count(virt_to_page(addr), 1);
469 printk ("Freeing unused kernel memory: %luk freed\n",
470 ((unsigned long)__init_end - (unsigned long)__init_begin) >> 10);
473 #ifdef CONFIG_BLK_DEV_INITRD
474 void free_initrd_mem(unsigned long start, unsigned long end)
477 printk ("Freeing initrd memory: %ldk freed\n", (end - start) >> 10);
478 for (; start < end; start += PAGE_SIZE) {
479 ClearPageReserved(virt_to_page(start));
480 set_page_count(virt_to_page(start), 1);
487 static DEFINE_SPINLOCK(mmu_context_lock);
488 static DEFINE_IDR(mmu_context_idr);
490 int init_new_context(struct task_struct *tsk, struct mm_struct *mm)
495 #ifdef CONFIG_HUGETLB_PAGE
496 /* We leave htlb_segs as it was, but for a fork, we need to
497 * clear the huge_pgdir. */
498 mm->context.huge_pgdir = NULL;
502 if (!idr_pre_get(&mmu_context_idr, GFP_KERNEL))
505 spin_lock(&mmu_context_lock);
506 err = idr_get_new_above(&mmu_context_idr, NULL, 1, &index);
507 spin_unlock(&mmu_context_lock);
514 if (index > MAX_CONTEXT) {
515 idr_remove(&mmu_context_idr, index);
519 mm->context.id = index;
524 void destroy_context(struct mm_struct *mm)
526 spin_lock(&mmu_context_lock);
527 idr_remove(&mmu_context_idr, mm->context.id);
528 spin_unlock(&mmu_context_lock);
530 mm->context.id = NO_CONTEXT;
532 hugetlb_mm_free_pgd(mm);
536 * Do very early mm setup.
538 void __init mm_init_ppc64(void)
540 #ifndef CONFIG_PPC_ISERIES
544 ppc64_boot_msg(0x100, "MM Init");
546 /* This is the story of the IO hole... please, keep seated,
547 * unfortunately, we are out of oxygen masks at the moment.
548 * So we need some rough way to tell where your big IO hole
549 * is. On pmac, it's between 2G and 4G, on POWER3, it's around
550 * that area as well, on POWER4 we don't have one, etc...
551 * We need that as a "hint" when sizing the TCE table on POWER3
552 * So far, the simplest way that seem work well enough for us it
553 * to just assume that the first discontinuity in our physical
554 * RAM layout is the IO hole. That may not be correct in the future
555 * (and isn't on iSeries but then we don't care ;)
558 #ifndef CONFIG_PPC_ISERIES
559 for (i = 1; i < lmb.memory.cnt; i++) {
560 unsigned long base, prevbase, prevsize;
562 prevbase = lmb.memory.region[i-1].physbase;
563 prevsize = lmb.memory.region[i-1].size;
564 base = lmb.memory.region[i].physbase;
565 if (base > (prevbase + prevsize)) {
566 io_hole_start = prevbase + prevsize;
567 io_hole_size = base - (prevbase + prevsize);
571 #endif /* CONFIG_PPC_ISERIES */
573 printk("IO Hole assumed to be %lx -> %lx\n",
574 io_hole_start, io_hole_start + io_hole_size - 1);
576 ppc64_boot_msg(0x100, "MM Init Done");
580 * This is called by /dev/mem to know if a given address has to
581 * be mapped non-cacheable or not
583 int page_is_ram(unsigned long pfn)
586 unsigned long paddr = (pfn << PAGE_SHIFT);
588 for (i=0; i < lmb.memory.cnt; i++) {
591 #ifdef CONFIG_MSCHUNKS
592 base = lmb.memory.region[i].physbase;
594 base = lmb.memory.region[i].base;
596 if ((paddr >= base) &&
597 (paddr < (base + lmb.memory.region[i].size))) {
604 EXPORT_SYMBOL(page_is_ram);
606 unsigned long next_ram_page(unsigned long pfn)
609 unsigned long paddr, base;
610 unsigned long best_base = (ULONG_MAX << PAGE_SHIFT);
613 paddr = (pfn << PAGE_SHIFT);
615 for (i=0; i < lmb.memory.cnt; i++) {
616 #ifdef CONFIG_MSCHUNKS
617 base = lmb.memory.region[i].physbase;
619 base = lmb.memory.region[i].base;
622 && (paddr < (base + lmb.memory.region[i].size)))
623 return (paddr >> PAGE_SHIFT);
624 if ((paddr < base) && (base < best_base))
627 if (best_base < (ULONG_MAX << PAGE_SHIFT))
628 return (best_base >> PAGE_SHIFT);
632 EXPORT_SYMBOL_GPL(next_ram_page);
635 * Initialize the bootmem system and give it all the memory we
638 #ifndef CONFIG_DISCONTIGMEM
639 void __init do_init_bootmem(void)
642 unsigned long start, bootmap_pages;
643 unsigned long total_pages = lmb_end_of_DRAM() >> PAGE_SHIFT;
647 * Find an area to use for the bootmem bitmap. Calculate the size of
648 * bitmap required as (Total Memory) / PAGE_SIZE / BITS_PER_BYTE.
649 * Add 1 additional page in case the address isn't page-aligned.
651 bootmap_pages = bootmem_bootmap_pages(total_pages);
653 start = abs_to_phys(lmb_alloc(bootmap_pages<<PAGE_SHIFT, PAGE_SIZE));
656 boot_mapsize = init_bootmem(start >> PAGE_SHIFT, total_pages);
658 max_pfn = max_low_pfn;
660 /* add all physical memory to the bootmem map. Also find the first */
661 for (i=0; i < lmb.memory.cnt; i++) {
662 unsigned long physbase, size;
664 physbase = lmb.memory.region[i].physbase;
665 size = lmb.memory.region[i].size;
666 free_bootmem(physbase, size);
669 /* reserve the sections we're already using */
670 for (i=0; i < lmb.reserved.cnt; i++) {
671 unsigned long physbase = lmb.reserved.region[i].physbase;
672 unsigned long size = lmb.reserved.region[i].size;
674 reserve_bootmem(physbase, size);
679 * paging_init() sets up the page tables - in fact we've already done this.
681 void __init paging_init(void)
683 unsigned long zones_size[MAX_NR_ZONES];
684 unsigned long zholes_size[MAX_NR_ZONES];
685 unsigned long total_ram = lmb_phys_mem_size();
686 unsigned long top_of_ram = lmb_end_of_DRAM();
688 printk(KERN_INFO "Top of RAM: 0x%lx, Total RAM: 0x%lx\n",
689 top_of_ram, total_ram);
690 printk(KERN_INFO "Memory hole size: %ldMB\n",
691 (top_of_ram - total_ram) >> 20);
693 * All pages are DMA-able so we put them all in the DMA zone.
695 memset(zones_size, 0, sizeof(zones_size));
696 memset(zholes_size, 0, sizeof(zholes_size));
698 zones_size[ZONE_DMA] = top_of_ram >> PAGE_SHIFT;
699 zholes_size[ZONE_DMA] = (top_of_ram - total_ram) >> PAGE_SHIFT;
701 free_area_init_node(0, NODE_DATA(0), zones_size,
702 __pa(PAGE_OFFSET) >> PAGE_SHIFT, zholes_size);
704 #endif /* CONFIG_DISCONTIGMEM */
706 static struct kcore_list kcore_vmem;
708 static int __init setup_kcore(void)
712 for (i=0; i < lmb.memory.cnt; i++) {
713 unsigned long physbase, size;
714 struct kcore_list *kcore_mem;
716 physbase = lmb.memory.region[i].physbase;
717 size = lmb.memory.region[i].size;
719 /* GFP_ATOMIC to avoid might_sleep warnings during boot */
720 kcore_mem = kmalloc(sizeof(struct kcore_list), GFP_ATOMIC);
722 panic("mem_init: kmalloc failed\n");
724 kclist_add(kcore_mem, __va(physbase), size);
727 kclist_add(&kcore_vmem, (void *)VMALLOC_START, VMALLOC_END-VMALLOC_START);
731 module_init(setup_kcore);
733 void __init mem_init(void)
735 #ifdef CONFIG_DISCONTIGMEM
741 unsigned long reservedpages = 0, codesize, initsize, datasize, bsssize;
743 num_physpages = max_low_pfn; /* RAM is assumed contiguous */
744 high_memory = (void *) __va(max_low_pfn * PAGE_SIZE);
746 #ifdef CONFIG_DISCONTIGMEM
747 for_each_online_node(nid) {
748 if (NODE_DATA(nid)->node_spanned_pages != 0) {
749 printk("freeing bootmem node %x\n", nid);
751 free_all_bootmem_node(NODE_DATA(nid));
755 max_mapnr = num_physpages;
756 totalram_pages += free_all_bootmem();
759 #ifdef CONFIG_PPC_PSERIES
760 /* Mark the RTAS pages as PG_reserved so userspace can mmap them */
762 unsigned long pfn, start_pfn, end_pfn;
764 start_pfn = rtas_rmo_buf >> PAGE_SHIFT;
765 end_pfn = (rtas_rmo_buf + RTAS_RMOBUF_MAX) >> PAGE_SHIFT;
766 for (pfn = start_pfn; pfn < end_pfn; pfn++)
767 SetPageReserved(pfn_to_page(pfn));
771 for_each_pgdat(pgdat) {
772 for (i = 0; i < pgdat->node_spanned_pages; i++) {
773 page = pgdat->node_mem_map + i;
774 if (PageReserved(page))
779 codesize = (unsigned long)&_etext - (unsigned long)&_stext;
780 initsize = (unsigned long)&__init_end - (unsigned long)&__init_begin;
781 datasize = (unsigned long)&_edata - (unsigned long)&__init_end;
782 bsssize = (unsigned long)&__bss_stop - (unsigned long)&__bss_start;
784 printk(KERN_INFO "Memory: %luk/%luk available (%luk kernel code, "
785 "%luk reserved, %luk data, %luk bss, %luk init)\n",
786 (unsigned long)nr_free_pages() << (PAGE_SHIFT-10),
787 num_physpages << (PAGE_SHIFT-10),
789 reservedpages << (PAGE_SHIFT-10),
796 #ifdef CONFIG_PPC_ISERIES
799 /* Initialize the vDSO */
804 * This is called when a page has been modified by the kernel.
805 * It just marks the page as not i-cache clean. We do the i-cache
806 * flush later when the page is given to a user process, if necessary.
808 void flush_dcache_page(struct page *page)
810 if (cpu_has_feature(CPU_FTR_COHERENT_ICACHE))
812 /* avoid an atomic op if possible */
813 if (test_bit(PG_arch_1, &page->flags))
814 clear_bit(PG_arch_1, &page->flags);
816 EXPORT_SYMBOL(flush_dcache_page);
818 void clear_user_page(void *page, unsigned long vaddr, struct page *pg)
822 if (cpu_has_feature(CPU_FTR_COHERENT_ICACHE))
825 * We shouldnt have to do this, but some versions of glibc
826 * require it (ld.so assumes zero filled pages are icache clean)
830 /* avoid an atomic op if possible */
831 if (test_bit(PG_arch_1, &pg->flags))
832 clear_bit(PG_arch_1, &pg->flags);
834 EXPORT_SYMBOL(clear_user_page);
836 void copy_user_page(void *vto, void *vfrom, unsigned long vaddr,
839 copy_page(vto, vfrom);
842 * We should be able to use the following optimisation, however
843 * there are two problems.
844 * Firstly a bug in some versions of binutils meant PLT sections
845 * were not marked executable.
846 * Secondly the first word in the GOT section is blrl, used
847 * to establish the GOT address. Until recently the GOT was
848 * not marked executable.
852 if (!vma->vm_file && ((vma->vm_flags & VM_EXEC) == 0))
856 if (cpu_has_feature(CPU_FTR_COHERENT_ICACHE))
859 /* avoid an atomic op if possible */
860 if (test_bit(PG_arch_1, &pg->flags))
861 clear_bit(PG_arch_1, &pg->flags);
864 void flush_icache_user_range(struct vm_area_struct *vma, struct page *page,
865 unsigned long addr, int len)
869 maddr = (unsigned long)page_address(page) + (addr & ~PAGE_MASK);
870 flush_icache_range(maddr, maddr + len);
872 EXPORT_SYMBOL(flush_icache_user_range);
875 * This is called at the end of handling a user page fault, when the
876 * fault has been handled by updating a PTE in the linux page tables.
877 * We use it to preload an HPTE into the hash table corresponding to
878 * the updated linux PTE.
880 * This must always be called with the mm->page_table_lock held
882 void update_mmu_cache(struct vm_area_struct *vma, unsigned long ea,
892 /* handle i-cache coherency */
893 if (!cpu_has_feature(CPU_FTR_COHERENT_ICACHE) &&
894 !cpu_has_feature(CPU_FTR_NOEXECUTE)) {
895 unsigned long pfn = pte_pfn(pte);
896 if (pfn_valid(pfn)) {
897 struct page *page = pfn_to_page(pfn);
898 if (!PageReserved(page)
899 && !test_bit(PG_arch_1, &page->flags)) {
900 __flush_dcache_icache(page_address(page));
901 set_bit(PG_arch_1, &page->flags);
906 /* We only want HPTEs for linux PTEs that have _PAGE_ACCESSED set */
910 pgdir = vma->vm_mm->pgd;
914 ptep = find_linux_pte(pgdir, ea);
918 vsid = get_vsid(vma->vm_mm->context.id, ea);
920 local_irq_save(flags);
921 tmp = cpumask_of_cpu(smp_processor_id());
922 if (cpus_equal(vma->vm_mm->cpu_vm_mask, tmp))
925 __hash_page(ea, pte_val(pte) & (_PAGE_USER|_PAGE_RW), vsid, ptep,
927 local_irq_restore(flags);
930 void __iomem * reserve_phb_iospace(unsigned long size)
932 void __iomem *virt_addr;
934 if (phbs_io_bot >= IMALLOC_BASE)
935 panic("reserve_phb_iospace(): phb io space overflow\n");
937 virt_addr = (void __iomem *) phbs_io_bot;
943 kmem_cache_t *zero_cache;
945 static void zero_ctor(void *pte, kmem_cache_t *cache, unsigned long flags)
947 memset(pte, 0, PAGE_SIZE);
950 void pgtable_cache_init(void)
952 zero_cache = kmem_cache_create("zero",
955 SLAB_HWCACHE_ALIGN | SLAB_MUST_HWCACHE_ALIGN,
959 panic("pgtable_cache_init(): could not create zero_cache!\n");
962 pgprot_t phys_mem_access_prot(struct file *file, unsigned long addr,
963 unsigned long size, pgprot_t vma_prot)
965 if (ppc_md.phys_mem_access_prot)
966 return ppc_md.phys_mem_access_prot(file, addr, size, vma_prot);
968 if (!page_is_ram(addr >> PAGE_SHIFT))
969 vma_prot = __pgprot(pgprot_val(vma_prot)
970 | _PAGE_GUARDED | _PAGE_NO_CACHE);
973 EXPORT_SYMBOL(phys_mem_access_prot);