3 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
5 * Modifications by Paul Mackerras (PowerMac) (paulus@cs.anu.edu.au)
6 * and Cort Dougan (PReP) (cort@cs.nmt.edu)
7 * Copyright (C) 1996 Paul Mackerras
8 * Amiga/APUS changes by Jesper Skov (jskov@cygnus.co.uk).
9 * PPC44x/36-bit changes by Matt Porter (mporter@mvista.com)
11 * Derived from "arch/i386/mm/init.c"
12 * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds
14 * This program is free software; you can redistribute it and/or
15 * modify it under the terms of the GNU General Public License
16 * as published by the Free Software Foundation; either version
17 * 2 of the License, or (at your option) any later version.
21 #include <linux/module.h>
22 #include <linux/sched.h>
23 #include <linux/kernel.h>
24 #include <linux/errno.h>
25 #include <linux/string.h>
26 #include <linux/types.h>
28 #include <linux/stddef.h>
29 #include <linux/init.h>
30 #include <linux/bootmem.h>
31 #include <linux/highmem.h>
32 #include <linux/initrd.h>
33 #include <linux/pagemap.h>
35 #include <asm/pgalloc.h>
38 #include <asm/mmu_context.h>
39 #include <asm/pgtable.h>
42 #include <asm/machdep.h>
43 #include <asm/btext.h>
47 #include <asm/sections.h>
53 #ifndef CPU_FTR_COHERENT_ICACHE
54 #define CPU_FTR_COHERENT_ICACHE 0 /* XXX for now */
55 #define CPU_FTR_NOEXECUTE 0
58 int init_bootmem_done;
60 unsigned long memory_limit;
62 extern void hash_preload(struct mm_struct *mm, unsigned long ea,
63 unsigned long access, unsigned long trap);
66 * This is called by /dev/mem to know if a given address has to
67 * be mapped non-cacheable or not
69 int page_is_ram(unsigned long pfn)
71 unsigned long paddr = (pfn << PAGE_SHIFT);
73 #ifndef CONFIG_PPC64 /* XXX for now */
74 return paddr < __pa(high_memory);
77 for (i=0; i < lmb.memory.cnt; i++) {
80 base = lmb.memory.region[i].base;
82 if ((paddr >= base) &&
83 (paddr < (base + lmb.memory.region[i].size))) {
91 EXPORT_SYMBOL(page_is_ram);
93 pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
94 unsigned long size, pgprot_t vma_prot)
96 if (ppc_md.phys_mem_access_prot)
97 return ppc_md.phys_mem_access_prot(file, pfn, size, vma_prot);
99 if (!page_is_ram(pfn))
100 vma_prot = __pgprot(pgprot_val(vma_prot)
101 | _PAGE_GUARDED | _PAGE_NO_CACHE);
104 EXPORT_SYMBOL(phys_mem_access_prot);
106 #ifdef CONFIG_MEMORY_HOTPLUG
108 void online_page(struct page *page)
110 ClearPageReserved(page);
111 init_page_count(page);
118 int memory_add_physaddr_to_nid(u64 start)
120 return hot_add_scn_to_nid(start);
124 int __devinit arch_add_memory(int nid, u64 start, u64 size)
126 struct pglist_data *pgdata;
128 unsigned long start_pfn = start >> PAGE_SHIFT;
129 unsigned long nr_pages = size >> PAGE_SHIFT;
131 pgdata = NODE_DATA(nid);
133 start = (unsigned long)__va(start);
134 create_section_mapping(start, start + size);
136 /* this should work for most non-highmem platforms */
137 zone = pgdata->node_zones;
139 return __add_pages(zone, start_pfn, nr_pages);
145 * First pass at this code will check to determine if the remove
146 * request is within the RMO. Do not allow removal within the RMO.
148 int __devinit remove_memory(u64 start, u64 size)
151 unsigned long start_pfn, end_pfn, nr_pages;
153 start_pfn = start >> PAGE_SHIFT;
154 nr_pages = size >> PAGE_SHIFT;
155 end_pfn = start_pfn + nr_pages;
157 printk("%s(): Attempting to remove memoy in range "
158 "%lx to %lx\n", __func__, start, start+size);
160 * check for range within RMO
162 zone = page_zone(pfn_to_page(start_pfn));
164 printk("%s(): memory will be removed from "
165 "the %s zone\n", __func__, zone->name);
168 * not handling removing memory ranges that
169 * overlap multiple zones yet
171 if (end_pfn > (zone->zone_start_pfn + zone->spanned_pages))
174 /* make sure it is NOT in RMO */
175 if ((start < lmb.rmo_size) || ((start+size) < lmb.rmo_size)) {
176 printk("%s(): range to be removed must NOT be in RMO!\n",
181 return __remove_pages(zone, start_pfn, nr_pages);
184 printk("%s(): memory range to be removed overlaps "
185 "multiple zones!!!\n", __func__);
189 #endif /* CONFIG_MEMORY_HOTPLUG */
193 unsigned long total = 0, reserved = 0;
194 unsigned long shared = 0, cached = 0;
195 unsigned long highmem = 0;
200 printk("Mem-info:\n");
202 printk("Free swap: %6ldkB\n", nr_swap_pages<<(PAGE_SHIFT-10));
203 for_each_online_pgdat(pgdat) {
205 pgdat_resize_lock(pgdat, &flags);
206 for (i = 0; i < pgdat->node_spanned_pages; i++) {
207 if (!pfn_valid(pgdat->node_start_pfn + i))
209 page = pgdat_page_nr(pgdat, i);
211 if (PageHighMem(page))
213 if (PageReserved(page))
215 else if (PageSwapCache(page))
217 else if (page_count(page))
218 shared += page_count(page) - 1;
220 pgdat_resize_unlock(pgdat, &flags);
222 printk("%ld pages of RAM\n", total);
223 #ifdef CONFIG_HIGHMEM
224 printk("%ld pages of HIGHMEM\n", highmem);
226 printk("%ld reserved pages\n", reserved);
227 printk("%ld pages shared\n", shared);
228 printk("%ld pages swap cached\n", cached);
232 * Initialize the bootmem system and give it all the memory we
233 * have available. If we are using highmem, we only put the
234 * lowmem into the bootmem system.
236 #ifndef CONFIG_NEED_MULTIPLE_NODES
237 void __init do_init_bootmem(void)
240 unsigned long start, bootmap_pages;
241 unsigned long total_pages;
244 max_pfn = total_pages = lmb_end_of_DRAM() >> PAGE_SHIFT;
245 #ifdef CONFIG_HIGHMEM
246 total_pages = total_lowmem >> PAGE_SHIFT;
250 * Find an area to use for the bootmem bitmap. Calculate the size of
251 * bitmap required as (Total Memory) / PAGE_SIZE / BITS_PER_BYTE.
252 * Add 1 additional page in case the address isn't page-aligned.
254 bootmap_pages = bootmem_bootmap_pages(total_pages);
256 start = lmb_alloc(bootmap_pages << PAGE_SHIFT, PAGE_SIZE);
258 boot_mapsize = init_bootmem(start >> PAGE_SHIFT, total_pages);
260 /* Add active regions with valid PFNs */
261 for (i = 0; i < lmb.memory.cnt; i++) {
262 unsigned long start_pfn, end_pfn;
263 start_pfn = lmb.memory.region[i].base >> PAGE_SHIFT;
264 end_pfn = start_pfn + lmb_size_pages(&lmb.memory, i);
265 add_active_range(0, start_pfn, end_pfn);
268 /* Add all physical memory to the bootmem map, mark each area
271 #ifdef CONFIG_HIGHMEM
272 free_bootmem_with_active_regions(0, total_lowmem >> PAGE_SHIFT);
274 free_bootmem_with_active_regions(0, max_pfn);
277 /* reserve the sections we're already using */
278 for (i = 0; i < lmb.reserved.cnt; i++)
279 reserve_bootmem(lmb.reserved.region[i].base,
280 lmb_size_bytes(&lmb.reserved, i));
282 /* XXX need to clip this if using highmem? */
283 sparse_memory_present_with_active_regions(0);
285 init_bootmem_done = 1;
289 * paging_init() sets up the page tables - in fact we've already done this.
291 void __init paging_init(void)
293 unsigned long total_ram = lmb_phys_mem_size();
294 unsigned long top_of_ram = lmb_end_of_DRAM();
295 unsigned long max_zone_pfns[MAX_NR_ZONES];
297 #ifdef CONFIG_HIGHMEM
298 map_page(PKMAP_BASE, 0, 0); /* XXX gross */
299 pkmap_page_table = pte_offset_kernel(pmd_offset(pgd_offset_k
300 (PKMAP_BASE), PKMAP_BASE), PKMAP_BASE);
301 map_page(KMAP_FIX_BEGIN, 0, 0); /* XXX gross */
302 kmap_pte = pte_offset_kernel(pmd_offset(pgd_offset_k
303 (KMAP_FIX_BEGIN), KMAP_FIX_BEGIN), KMAP_FIX_BEGIN);
304 kmap_prot = PAGE_KERNEL;
305 #endif /* CONFIG_HIGHMEM */
307 printk(KERN_DEBUG "Top of RAM: 0x%lx, Total RAM: 0x%lx\n",
308 top_of_ram, total_ram);
309 printk(KERN_DEBUG "Memory hole size: %ldMB\n",
310 (top_of_ram - total_ram) >> 20);
311 memset(max_zone_pfns, 0, sizeof(max_zone_pfns));
312 #ifdef CONFIG_HIGHMEM
313 max_zone_pfns[ZONE_DMA] = total_lowmem >> PAGE_SHIFT;
314 max_zone_pfns[ZONE_HIGHMEM] = top_of_ram >> PAGE_SHIFT;
316 max_zone_pfns[ZONE_DMA] = top_of_ram >> PAGE_SHIFT;
318 free_area_init_nodes(max_zone_pfns);
320 #endif /* ! CONFIG_NEED_MULTIPLE_NODES */
322 void __init mem_init(void)
324 #ifdef CONFIG_NEED_MULTIPLE_NODES
330 unsigned long reservedpages = 0, codesize, initsize, datasize, bsssize;
332 num_physpages = lmb.memory.size >> PAGE_SHIFT;
333 high_memory = (void *) __va(max_low_pfn * PAGE_SIZE);
335 #ifdef CONFIG_NEED_MULTIPLE_NODES
336 for_each_online_node(nid) {
337 if (NODE_DATA(nid)->node_spanned_pages != 0) {
338 printk("freeing bootmem node %d\n", nid);
340 free_all_bootmem_node(NODE_DATA(nid));
345 totalram_pages += free_all_bootmem();
348 #ifdef CONFIG_PPC_PSERIES
349 /* Mark the RTAS pages as PG_reserved so userspace can mmap them */
351 unsigned long pfn, start_pfn, end_pfn;
353 start_pfn = rtas_rmo_buf >> PAGE_SHIFT;
354 end_pfn = (rtas_rmo_buf + RTAS_RMOBUF_MAX) >> PAGE_SHIFT;
355 for (pfn = start_pfn; pfn < end_pfn; pfn++)
356 SetPageReserved(pfn_to_page(pfn));
360 for_each_online_pgdat(pgdat) {
361 for (i = 0; i < pgdat->node_spanned_pages; i++) {
362 if (!pfn_valid(pgdat->node_start_pfn + i))
364 page = pgdat_page_nr(pgdat, i);
365 if (PageReserved(page))
370 codesize = (unsigned long)&_sdata - (unsigned long)&_stext;
371 datasize = (unsigned long)&_edata - (unsigned long)&_sdata;
372 initsize = (unsigned long)&__init_end - (unsigned long)&__init_begin;
373 bsssize = (unsigned long)&__bss_stop - (unsigned long)&__bss_start;
375 #ifdef CONFIG_HIGHMEM
377 unsigned long pfn, highmem_mapnr;
379 highmem_mapnr = total_lowmem >> PAGE_SHIFT;
380 for (pfn = highmem_mapnr; pfn < max_mapnr; ++pfn) {
381 struct page *page = pfn_to_page(pfn);
383 ClearPageReserved(page);
384 init_page_count(page);
388 totalram_pages += totalhigh_pages;
389 printk(KERN_DEBUG "High memory: %luk\n",
390 totalhigh_pages << (PAGE_SHIFT-10));
392 #endif /* CONFIG_HIGHMEM */
394 printk(KERN_INFO "Memory: %luk/%luk available (%luk kernel code, "
395 "%luk reserved, %luk data, %luk bss, %luk init)\n",
396 (unsigned long)nr_free_pages() << (PAGE_SHIFT-10),
397 num_physpages << (PAGE_SHIFT-10),
399 reservedpages << (PAGE_SHIFT-10),
406 /* Initialize the vDSO */
411 * This is called when a page has been modified by the kernel.
412 * It just marks the page as not i-cache clean. We do the i-cache
413 * flush later when the page is given to a user process, if necessary.
415 void flush_dcache_page(struct page *page)
417 if (cpu_has_feature(CPU_FTR_COHERENT_ICACHE))
419 /* avoid an atomic op if possible */
420 if (test_bit(PG_arch_1, &page->flags))
421 clear_bit(PG_arch_1, &page->flags);
423 EXPORT_SYMBOL(flush_dcache_page);
425 void flush_dcache_icache_page(struct page *page)
428 void *start = kmap_atomic(page, KM_PPC_SYNC_ICACHE);
429 __flush_dcache_icache(start);
430 kunmap_atomic(start, KM_PPC_SYNC_ICACHE);
431 #elif defined(CONFIG_8xx) || defined(CONFIG_PPC64)
432 /* On 8xx there is no need to kmap since highmem is not supported */
433 __flush_dcache_icache(page_address(page));
435 __flush_dcache_icache_phys(page_to_pfn(page) << PAGE_SHIFT);
439 void clear_user_page(void *page, unsigned long vaddr, struct page *pg)
444 * We shouldnt have to do this, but some versions of glibc
445 * require it (ld.so assumes zero filled pages are icache clean)
448 flush_dcache_page(pg);
450 EXPORT_SYMBOL(clear_user_page);
452 void copy_user_page(void *vto, void *vfrom, unsigned long vaddr,
455 copy_page(vto, vfrom);
458 * We should be able to use the following optimisation, however
459 * there are two problems.
460 * Firstly a bug in some versions of binutils meant PLT sections
461 * were not marked executable.
462 * Secondly the first word in the GOT section is blrl, used
463 * to establish the GOT address. Until recently the GOT was
464 * not marked executable.
468 if (!vma->vm_file && ((vma->vm_flags & VM_EXEC) == 0))
472 flush_dcache_page(pg);
475 void flush_icache_user_range(struct vm_area_struct *vma, struct page *page,
476 unsigned long addr, int len)
480 maddr = (unsigned long) kmap(page) + (addr & ~PAGE_MASK);
481 flush_icache_range(maddr, maddr + len);
484 EXPORT_SYMBOL(flush_icache_user_range);
487 * This is called at the end of handling a user page fault, when the
488 * fault has been handled by updating a PTE in the linux page tables.
489 * We use it to preload an HPTE into the hash table corresponding to
490 * the updated linux PTE.
492 * This must always be called with the pte lock held.
494 void update_mmu_cache(struct vm_area_struct *vma, unsigned long address,
497 #ifdef CONFIG_PPC_STD_MMU
498 unsigned long access = 0, trap;
500 unsigned long pfn = pte_pfn(pte);
502 /* handle i-cache coherency */
503 if (!cpu_has_feature(CPU_FTR_COHERENT_ICACHE) &&
504 !cpu_has_feature(CPU_FTR_NOEXECUTE) &&
506 struct page *page = pfn_to_page(pfn);
507 if (!PageReserved(page)
508 && !test_bit(PG_arch_1, &page->flags)) {
509 if (vma->vm_mm == current->active_mm) {
511 /* On 8xx, cache control instructions (particularly
512 * "dcbst" from flush_dcache_icache) fault as write
513 * operation if there is an unpopulated TLB entry
514 * for the address in question. To workaround that,
515 * we invalidate the TLB here, thus avoiding dcbst
520 __flush_dcache_icache((void *) address);
522 flush_dcache_icache_page(page);
523 set_bit(PG_arch_1, &page->flags);
527 #ifdef CONFIG_PPC_STD_MMU
528 /* We only want HPTEs for linux PTEs that have _PAGE_ACCESSED set */
529 if (!pte_young(pte) || address >= TASK_SIZE)
532 /* We try to figure out if we are coming from an instruction
533 * access fault and pass that down to __hash_page so we avoid
534 * double-faulting on execution of fresh text. We have to test
535 * for regs NULL since init will get here first thing at boot
537 * We also avoid filling the hash if not coming from a fault
539 if (current->thread.regs == NULL)
541 trap = TRAP(current->thread.regs);
543 access |= _PAGE_EXEC;
544 else if (trap != 0x300)
546 hash_preload(vma->vm_mm, address, access, trap);
547 #endif /* CONFIG_PPC_STD_MMU */