2 * linux/arch/arm/mm/init.c
4 * Copyright (C) 1995-2002 Russell King
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
10 #include <linux/config.h>
11 #include <linux/kernel.h>
12 #include <linux/errno.h>
13 #include <linux/ptrace.h>
14 #include <linux/swap.h>
15 #include <linux/init.h>
16 #include <linux/bootmem.h>
17 #include <linux/mman.h>
18 #include <linux/initrd.h>
20 #include <asm/mach-types.h>
21 #include <asm/hardware.h>
22 #include <asm/setup.h>
25 #include <asm/mach/arch.h>
26 #include <asm/mach/map.h>
28 #define TABLE_SIZE (2 * PTRS_PER_PTE * sizeof(pte_t))
30 DEFINE_PER_CPU(struct mmu_gather, mmu_gathers);
32 extern pgd_t swapper_pg_dir[PTRS_PER_PGD];
33 extern char _stext, _text, _etext, _end, __init_begin, __init_end;
34 extern unsigned long phys_initrd_start;
35 extern unsigned long phys_initrd_size;
38 * The sole use of this is to pass memory configuration
39 * data from paging_init to mem_init.
41 static struct meminfo meminfo __initdata = { 0, };
44 * empty_zero_page is a special page that is used for
45 * zero-initialized data and COW.
47 struct page *empty_zero_page;
51 int free = 0, total = 0, reserved = 0;
52 int shared = 0, cached = 0, slab = 0, node;
54 printk("Mem-info:\n");
56 printk("Free swap: %6ldkB\n", nr_swap_pages<<(PAGE_SHIFT-10));
58 for (node = 0; node < numnodes; node++) {
59 struct page *page, *end;
61 page = NODE_MEM_MAP(node);
62 end = page + NODE_DATA(node)->node_spanned_pages;
66 if (PageReserved(page))
68 else if (PageSwapCache(page))
70 else if (PageSlab(page))
72 else if (!page_count(page))
75 shared += page_count(page) - 1;
80 printk("%d pages of RAM\n", total);
81 printk("%d free pages\n", free);
82 printk("%d reserved pages\n", reserved);
83 printk("%d slab pages\n", slab);
84 printk("%d pages shared\n", shared);
85 printk("%d pages swap cached\n", cached);
94 #define O_PFN_DOWN(x) ((x) >> PAGE_SHIFT)
95 #define V_PFN_DOWN(x) O_PFN_DOWN(__pa(x))
97 #define O_PFN_UP(x) (PAGE_ALIGN(x) >> PAGE_SHIFT)
98 #define V_PFN_UP(x) O_PFN_UP(__pa(x))
100 #define PFN_SIZE(x) ((x) >> PAGE_SHIFT)
101 #define PFN_RANGE(s,e) PFN_SIZE(PAGE_ALIGN((unsigned long)(e)) - \
102 (((unsigned long)(s)) & PAGE_MASK))
105 * FIXME: We really want to avoid allocating the bootmap bitmap
106 * over the top of the initrd. Hopefully, this is located towards
107 * the start of a bank, so if we allocate the bootmap bitmap at
108 * the end, we won't clash.
110 static unsigned int __init
111 find_bootmap_pfn(int node, struct meminfo *mi, unsigned int bootmap_pages)
113 unsigned int start_pfn, bank, bootmap_pfn;
115 start_pfn = V_PFN_UP(&_end);
118 for (bank = 0; bank < mi->nr_banks; bank ++) {
119 unsigned int start, end;
121 if (mi->bank[bank].node != node)
124 start = O_PFN_UP(mi->bank[bank].start);
125 end = O_PFN_DOWN(mi->bank[bank].size +
126 mi->bank[bank].start);
131 if (start < start_pfn)
137 if (end - start >= bootmap_pages) {
143 if (bootmap_pfn == 0)
150 * Scan the memory info structure and pull out:
151 * - the end of memory
152 * - the number of nodes
153 * - the pfn range of each node
154 * - the number of bootmem bitmap pages
156 static unsigned int __init
157 find_memend_and_nodes(struct meminfo *mi, struct node_info *np)
159 unsigned int i, bootmem_pages = 0, memend_pfn = 0;
161 for (i = 0; i < MAX_NUMNODES; i++) {
164 np[i].bootmap_pages = 0;
167 for (i = 0; i < mi->nr_banks; i++) {
168 unsigned long start, end;
171 if (mi->bank[i].size == 0) {
173 * Mark this bank with an invalid node number
175 mi->bank[i].node = -1;
179 node = mi->bank[i].node;
181 if (node >= numnodes) {
185 * Make sure we haven't exceeded the maximum number
186 * of nodes that we have in this configuration. If
187 * we have, we're in trouble. (maybe we ought to
188 * limit, instead of bugging?)
190 if (numnodes > MAX_NUMNODES)
195 * Get the start and end pfns for this bank
197 start = O_PFN_UP(mi->bank[i].start);
198 end = O_PFN_DOWN(mi->bank[i].start + mi->bank[i].size);
200 if (np[node].start > start)
201 np[node].start = start;
203 if (np[node].end < end)
206 if (memend_pfn < end)
211 * Calculate the number of pages we require to
212 * store the bootmem bitmaps.
214 for (i = 0; i < numnodes; i++) {
218 np[i].bootmap_pages = bootmem_bootmap_pages(np[i].end -
220 bootmem_pages += np[i].bootmap_pages;
223 high_memory = __va(memend_pfn << PAGE_SHIFT);
226 * This doesn't seem to be used by the Linux memory
227 * manager any more. If we can get rid of it, we
228 * also get rid of some of the stuff above as well.
230 max_low_pfn = memend_pfn - O_PFN_DOWN(PHYS_OFFSET);
231 max_pfn = memend_pfn - O_PFN_DOWN(PHYS_OFFSET);
233 return bootmem_pages;
236 static int __init check_initrd(struct meminfo *mi)
238 int initrd_node = -2;
239 #ifdef CONFIG_BLK_DEV_INITRD
240 unsigned long end = phys_initrd_start + phys_initrd_size;
243 * Make sure that the initrd is within a valid area of
246 if (phys_initrd_size) {
251 for (i = 0; i < mi->nr_banks; i++) {
252 unsigned long bank_end;
254 bank_end = mi->bank[i].start + mi->bank[i].size;
256 if (mi->bank[i].start <= phys_initrd_start &&
258 initrd_node = mi->bank[i].node;
262 if (initrd_node == -1) {
263 printk(KERN_ERR "initrd (0x%08lx - 0x%08lx) extends beyond "
264 "physical memory - disabling initrd\n",
265 phys_initrd_start, end);
266 phys_initrd_start = phys_initrd_size = 0;
274 * Reserve the various regions of node 0
276 static __init void reserve_node_zero(unsigned int bootmap_pfn, unsigned int bootmap_pages)
278 pg_data_t *pgdat = NODE_DATA(0);
279 unsigned long res_size = 0;
282 * Register the kernel text and data with bootmem.
283 * Note that this can only be in node 0.
285 reserve_bootmem_node(pgdat, __pa(&_stext), &_end - &_stext);
288 * Reserve the page tables. These are already in use,
289 * and can only be in node 0.
291 reserve_bootmem_node(pgdat, __pa(swapper_pg_dir),
292 PTRS_PER_PGD * sizeof(pgd_t));
295 * And don't forget to reserve the allocator bitmap,
296 * which will be freed later.
298 reserve_bootmem_node(pgdat, bootmap_pfn << PAGE_SHIFT,
299 bootmap_pages << PAGE_SHIFT);
302 * Hmm... This should go elsewhere, but we really really need to
303 * stop things allocating the low memory; ideally we need a better
304 * implementation of GFP_DMA which does not assume that DMA-able
305 * memory starts at zero.
307 if (machine_is_integrator() || machine_is_cintegrator())
308 res_size = __pa(swapper_pg_dir) - PHYS_OFFSET;
311 * These should likewise go elsewhere. They pre-reserve the
312 * screen memory region at the start of main system memory.
314 if (machine_is_edb7211())
315 res_size = 0x00020000;
316 if (machine_is_p720t())
317 res_size = 0x00014000;
321 * Because of the SA1111 DMA bug, we want to preserve our
322 * precious DMA-able memory...
324 res_size = __pa(swapper_pg_dir) - PHYS_OFFSET;
327 reserve_bootmem_node(pgdat, PHYS_OFFSET, res_size);
331 * Register all available RAM in this node with the bootmem allocator.
333 static inline void free_bootmem_node_bank(int node, struct meminfo *mi)
335 pg_data_t *pgdat = NODE_DATA(node);
338 for (bank = 0; bank < mi->nr_banks; bank++)
339 if (mi->bank[bank].node == node)
340 free_bootmem_node(pgdat, mi->bank[bank].start,
341 mi->bank[bank].size);
345 * Initialise the bootmem allocator for all nodes. This is called
346 * early during the architecture specific initialisation.
348 static void __init bootmem_init(struct meminfo *mi)
350 struct node_info node_info[MAX_NUMNODES], *np = node_info;
351 unsigned int bootmap_pages, bootmap_pfn, map_pg;
352 int node, initrd_node;
354 bootmap_pages = find_memend_and_nodes(mi, np);
355 bootmap_pfn = find_bootmap_pfn(0, mi, bootmap_pages);
356 initrd_node = check_initrd(mi);
358 map_pg = bootmap_pfn;
361 * Initialise the bootmem nodes.
363 * What we really want to do is:
365 * unmap_all_regions_except_kernel();
366 * for_each_node_in_reverse_order(node) {
368 * allocate_bootmem_map(node);
369 * init_bootmem_node(node);
370 * free_bootmem_node(node);
373 * but this is a 2.5-type change. For now, we just set
374 * the nodes up in reverse order.
376 * (we could also do with rolling bootmem_init and paging_init
377 * into one generic "memory_init" type function).
380 for (node = numnodes - 1; node >= 0; node--, np--) {
382 * If there are no pages in this node, ignore it.
383 * Note that node 0 must always have some pages.
392 * Initialise the bootmem allocator.
394 init_bootmem_node(NODE_DATA(node), map_pg, np->start, np->end);
395 free_bootmem_node_bank(node, mi);
396 map_pg += np->bootmap_pages;
399 * If this is node 0, we need to reserve some areas ASAP -
400 * we may use bootmem on node 0 to setup the other nodes.
403 reserve_node_zero(bootmap_pfn, bootmap_pages);
407 #ifdef CONFIG_BLK_DEV_INITRD
408 if (phys_initrd_size && initrd_node >= 0) {
409 reserve_bootmem_node(NODE_DATA(initrd_node), phys_initrd_start,
411 initrd_start = __phys_to_virt(phys_initrd_start);
412 initrd_end = initrd_start + phys_initrd_size;
416 BUG_ON(map_pg != bootmap_pfn + bootmap_pages);
420 * paging_init() sets up the page tables, initialises the zone memory
421 * maps, and sets up the zero page, bad page and bad page tables.
423 void __init paging_init(struct meminfo *mi, struct machine_desc *mdesc)
430 memcpy(&meminfo, mi, sizeof(meminfo));
433 * allocate the zero page. Note that we count on this going ok.
435 zero_page = alloc_bootmem_low_pages(PAGE_SIZE);
438 * initialise the page tables.
446 * initialise the zones within each node
448 for (node = 0; node < numnodes; node++) {
449 unsigned long zone_size[MAX_NR_ZONES];
450 unsigned long zhole_size[MAX_NR_ZONES];
451 struct bootmem_data *bdata;
456 * Initialise the zone size information.
458 for (i = 0; i < MAX_NR_ZONES; i++) {
463 pgdat = NODE_DATA(node);
464 bdata = pgdat->bdata;
467 * The size of this node has already been determined.
468 * If we need to do anything fancy with the allocation
469 * of this memory to the zones, now is the time to do
472 zone_size[0] = bdata->node_low_pfn -
473 (bdata->node_boot_start >> PAGE_SHIFT);
476 * If this zone has zero size, skip it.
482 * For each bank in this node, calculate the size of the
483 * holes. holes = node_size - sum(bank_sizes_in_node)
485 zhole_size[0] = zone_size[0];
486 for (i = 0; i < mi->nr_banks; i++) {
487 if (mi->bank[i].node != node)
490 zhole_size[0] -= mi->bank[i].size >> PAGE_SHIFT;
494 * Adjust the sizes according to any special
495 * requirements for this machine type.
497 arch_adjust_zones(node, zone_size, zhole_size);
499 free_area_init_node(node, pgdat, zone_size,
500 bdata->node_boot_start >> PAGE_SHIFT, zhole_size);
503 #ifndef CONFIG_DISCONTIGMEM
504 mem_map = contig_page_data.node_mem_map;
508 * finish off the bad pages once
509 * the mem_map is initialised
511 memzero(zero_page, PAGE_SIZE);
512 empty_zero_page = virt_to_page(zero_page);
513 flush_dcache_page(empty_zero_page);
516 static inline void free_area(unsigned long addr, unsigned long end, char *s)
518 unsigned int size = (end - addr) >> 10;
520 for (; addr < end; addr += PAGE_SIZE) {
521 struct page *page = virt_to_page(addr);
522 ClearPageReserved(page);
523 set_page_count(page, 1);
529 printk(KERN_INFO "Freeing %s memory: %dK\n", s, size);
533 * mem_init() marks the free areas in the mem_map and tells us how much
534 * memory is free. This is done after various parts of the system have
535 * claimed their memory after the kernel image.
537 void __init mem_init(void)
539 unsigned int codepages, datapages, initpages;
542 codepages = &_etext - &_text;
543 datapages = &_end - &_etext;
544 initpages = &__init_end - &__init_begin;
546 #ifndef CONFIG_DISCONTIGMEM
547 max_mapnr = virt_to_page(high_memory) - mem_map;
551 * We may have non-contiguous memory.
553 if (meminfo.nr_banks != 1)
554 create_memmap_holes(&meminfo);
556 /* this will put all unused low memory onto the freelists */
557 for (node = 0; node < numnodes; node++) {
558 pg_data_t *pgdat = NODE_DATA(node);
560 if (pgdat->node_spanned_pages != 0)
561 totalram_pages += free_all_bootmem_node(pgdat);
565 /* now that our DMA memory is actually so designated, we can free it */
566 free_area(PAGE_OFFSET, (unsigned long)swapper_pg_dir, NULL);
570 * Since our memory may not be contiguous, calculate the
571 * real number of pages we have in this system
573 printk(KERN_INFO "Memory:");
576 for (i = 0; i < meminfo.nr_banks; i++) {
577 num_physpages += meminfo.bank[i].size >> PAGE_SHIFT;
578 printk(" %ldMB", meminfo.bank[i].size >> 20);
581 printk(" = %luMB total\n", num_physpages >> (20 - PAGE_SHIFT));
582 printk(KERN_NOTICE "Memory: %luKB available (%dK code, "
583 "%dK data, %dK init)\n",
584 (unsigned long) nr_free_pages() << (PAGE_SHIFT-10),
585 codepages >> 10, datapages >> 10, initpages >> 10);
587 if (PAGE_SIZE >= 16384 && num_physpages <= 128) {
588 extern int sysctl_overcommit_memory;
590 * On a machine this small we won't get
591 * anywhere without overcommit, so turn
594 sysctl_overcommit_memory = OVERCOMMIT_ALWAYS;
598 void free_initmem(void)
600 if (!machine_is_integrator() && !machine_is_cintegrator()) {
601 free_area((unsigned long)(&__init_begin),
602 (unsigned long)(&__init_end),
607 #ifdef CONFIG_BLK_DEV_INITRD
609 static int keep_initrd;
611 void free_initrd_mem(unsigned long start, unsigned long end)
614 free_area(start, end, "initrd");
617 static int __init keepinitrd_setup(char *__unused)
623 __setup("keepinitrd", keepinitrd_setup);