2 * Copyright (c) 2000, 2003 Silicon Graphics, Inc. All rights reserved.
3 * Copyright (c) 2001 Intel Corp.
4 * Copyright (c) 2001 Tony Luck <tony.luck@intel.com>
5 * Copyright (c) 2002 NEC Corp.
6 * Copyright (c) 2002 Kimio Suganuma <k-suganuma@da.jp.nec.com>
10 * Platform initialization for Discontig Memory
13 #include <linux/kernel.h>
15 #include <linux/swap.h>
16 #include <linux/bootmem.h>
17 #include <linux/acpi.h>
18 #include <linux/efi.h>
19 #include <linux/nodemask.h>
20 #include <linux/module.h>
21 #include <asm/pgalloc.h>
23 #include <asm/meminit.h>
25 #include <asm/sections.h>
28 * Track per-node information needed to setup the boot memory allocator, the
29 * per-node areas, and the real VM.
31 struct early_node_data {
32 struct ia64_node_data *node_data;
34 unsigned long pernode_addr;
35 unsigned long pernode_size;
36 struct bootmem_data bootmem_data;
37 unsigned long num_physpages;
38 unsigned long num_dma_physpages;
39 unsigned long min_pfn;
40 unsigned long max_pfn;
43 static struct early_node_data mem_data[NR_NODES] __initdata;
46 * reassign_cpu_only_nodes - called from find_memory to move CPU-only nodes to a memory node
48 * This function will move nodes with only CPUs (no memory)
49 * to a node with memory which is at the minimum numa_slit distance.
50 * Any reassigments will result in the compression of the nodes
51 * and renumbering the nid values where appropriate.
52 * The static declarations below are to avoid large stack size which
53 * makes the code not re-entrant.
55 static void __init reassign_cpu_only_nodes(void)
57 struct node_memblk_s *p;
58 int i, j, k, nnode, nid, cpu, cpunid, pxm;
60 static DECLARE_BITMAP(nodes_with_mem, NR_NODES) __initdata;
61 static u8 numa_slit_fix[MAX_NUMNODES * MAX_NUMNODES] __initdata;
62 static int node_flip[NR_NODES] __initdata;
63 static int old_nid_map[NR_CPUS] __initdata;
65 for (nnode = 0, p = &node_memblk[0]; p < &node_memblk[num_node_memblks]; p++)
66 if (!test_bit(p->nid, (void *) nodes_with_mem)) {
67 set_bit(p->nid, (void *) nodes_with_mem);
72 * All nids with memory.
74 if (nnode == numnodes)
78 * Change nids and attempt to migrate CPU-only nodes
79 * to the best numa_slit (closest neighbor) possible.
80 * For reassigned CPU nodes a nid can't be arrived at
81 * until after this loop because the target nid's new
82 * identity might not have been established yet. So
83 * new nid values are fabricated above numnodes and
84 * mapped back later to their true value.
86 for (nid = 0, i = 0; i < numnodes; i++) {
87 if (test_bit(i, (void *) nodes_with_mem)) {
89 * Save original nid value for numa_slit
90 * fixup and node_cpuid reassignments.
99 for (p = &node_memblk[0]; p < &node_memblk[num_node_memblks]; p++)
108 for (cpu = 0; cpu < NR_CPUS; cpu++)
109 if (node_cpuid[cpu].nid == i) {
111 * For nodes not being reassigned just
112 * fix the cpu's nid and reverse pxm map
114 if (cpunid < numnodes) {
115 pxm = nid_to_pxm_map[i];
116 pxm_to_nid_map[pxm] =
117 node_cpuid[cpu].nid = cpunid;
122 * For nodes being reassigned, find best node by
123 * numa_slit information and then make a temporary
124 * nid value based on current nid and numnodes.
126 for (slit = 0xff, k = numnodes + numnodes, j = 0; j < numnodes; j++)
129 else if (test_bit(j, (void *) nodes_with_mem)) {
130 cslit = numa_slit[i * numnodes + j];
137 /* save old nid map so we can update the pxm */
138 old_nid_map[cpu] = node_cpuid[cpu].nid;
139 node_cpuid[cpu].nid = k;
144 * Fixup temporary nid values for CPU-only nodes.
146 for (cpu = 0; cpu < NR_CPUS; cpu++)
147 if (node_cpuid[cpu].nid == (numnodes + numnodes)) {
148 pxm = nid_to_pxm_map[old_nid_map[cpu]];
149 pxm_to_nid_map[pxm] = node_cpuid[cpu].nid = nnode - 1;
151 for (i = 0; i < nnode; i++) {
152 if (node_flip[i] != (node_cpuid[cpu].nid - numnodes))
155 pxm = nid_to_pxm_map[old_nid_map[cpu]];
156 pxm_to_nid_map[pxm] = node_cpuid[cpu].nid = i;
162 * Fix numa_slit by compressing from larger
163 * nid array to reduced nid array.
165 for (i = 0; i < nnode; i++)
166 for (j = 0; j < nnode; j++)
167 numa_slit_fix[i * nnode + j] =
168 numa_slit[node_flip[i] * numnodes + node_flip[j]];
170 memcpy(numa_slit, numa_slit_fix, sizeof (numa_slit));
172 for (i = nnode; i < numnodes; i++)
181 * To prevent cache aliasing effects, align per-node structures so that they
182 * start at addresses that are strided by node number.
184 #define NODEDATA_ALIGN(addr, node) \
185 ((((addr) + 1024*1024-1) & ~(1024*1024-1)) + (node)*PERCPU_PAGE_SIZE)
188 * build_node_maps - callback to setup bootmem structs for each node
189 * @start: physical start of range
190 * @len: length of range
191 * @node: node where this range resides
193 * We allocate a struct bootmem_data for each piece of memory that we wish to
194 * treat as a virtually contiguous block (i.e. each node). Each such block
195 * must start on an %IA64_GRANULE_SIZE boundary, so we round the address down
196 * if necessary. Any non-existent pages will simply be part of the virtual
197 * memmap. We also update min_low_pfn and max_low_pfn here as we receive
198 * memory ranges from the caller.
200 static int __init build_node_maps(unsigned long start, unsigned long len,
203 unsigned long cstart, epfn, end = start + len;
204 struct bootmem_data *bdp = &mem_data[node].bootmem_data;
206 epfn = GRANULEROUNDUP(end) >> PAGE_SHIFT;
207 cstart = GRANULEROUNDDOWN(start);
209 if (!bdp->node_low_pfn) {
210 bdp->node_boot_start = cstart;
211 bdp->node_low_pfn = epfn;
213 bdp->node_boot_start = min(cstart, bdp->node_boot_start);
214 bdp->node_low_pfn = max(epfn, bdp->node_low_pfn);
217 min_low_pfn = min(min_low_pfn, bdp->node_boot_start>>PAGE_SHIFT);
218 max_low_pfn = max(max_low_pfn, bdp->node_low_pfn);
224 * early_nr_cpus_node - return number of cpus on a given node
225 * @node: node to check
227 * Count the number of cpus on @node. We can't use nr_cpus_node() yet because
228 * acpi_boot_init() (which builds the node_to_cpu_mask array) hasn't been
231 static int early_nr_cpus_node(int node)
235 for (cpu = 0; cpu < NR_CPUS; cpu++)
236 if (node == node_cpuid[cpu].nid)
243 * find_pernode_space - allocate memory for memory map and per-node structures
244 * @start: physical start of range
245 * @len: length of range
246 * @node: node where this range resides
248 * This routine reserves space for the per-cpu data struct, the list of
249 * pg_data_ts and the per-node data struct. Each node will have something like
250 * the following in the first chunk of addr. space large enough to hold it.
252 * ________________________
254 * |~~~~~~~~~~~~~~~~~~~~~~~~| <-- NODEDATA_ALIGN(start, node) for the first
255 * | PERCPU_PAGE_SIZE * | start and length big enough
257 * |------------------------|
258 * | local pg_data_t * |
259 * |------------------------|
260 * | local ia64_node_data |
261 * |------------------------|
263 * |________________________|
265 * Once this space has been set aside, the bootmem maps are initialized. We
266 * could probably move the allocation of the per-cpu and ia64_node_data space
267 * outside of this function and use alloc_bootmem_node(), but doing it here
268 * is straightforward and we get the alignments we want so...
270 static int __init find_pernode_space(unsigned long start, unsigned long len,
273 unsigned long epfn, cpu, cpus;
274 unsigned long pernodesize = 0, pernode, pages, mapsize;
276 struct bootmem_data *bdp = &mem_data[node].bootmem_data;
278 epfn = (start + len) >> PAGE_SHIFT;
280 pages = bdp->node_low_pfn - (bdp->node_boot_start >> PAGE_SHIFT);
281 mapsize = bootmem_bootmap_pages(pages) << PAGE_SHIFT;
284 * Make sure this memory falls within this node's usable memory
285 * since we may have thrown some away in build_maps().
287 if (start < bdp->node_boot_start || epfn > bdp->node_low_pfn)
290 /* Don't setup this node's local space twice... */
291 if (mem_data[node].pernode_addr)
295 * Calculate total size needed, incl. what's necessary
296 * for good alignment and alias prevention.
298 cpus = early_nr_cpus_node(node);
299 pernodesize += PERCPU_PAGE_SIZE * cpus;
300 pernodesize += L1_CACHE_ALIGN(sizeof(pg_data_t));
301 pernodesize += L1_CACHE_ALIGN(sizeof(struct ia64_node_data));
302 pernodesize = PAGE_ALIGN(pernodesize);
303 pernode = NODEDATA_ALIGN(start, node);
305 /* Is this range big enough for what we want to store here? */
306 if (start + len > (pernode + pernodesize + mapsize)) {
307 mem_data[node].pernode_addr = pernode;
308 mem_data[node].pernode_size = pernodesize;
309 memset(__va(pernode), 0, pernodesize);
311 cpu_data = (void *)pernode;
312 pernode += PERCPU_PAGE_SIZE * cpus;
314 mem_data[node].pgdat = __va(pernode);
315 pernode += L1_CACHE_ALIGN(sizeof(pg_data_t));
317 mem_data[node].node_data = __va(pernode);
318 pernode += L1_CACHE_ALIGN(sizeof(struct ia64_node_data));
320 mem_data[node].pgdat->bdata = bdp;
321 pernode += L1_CACHE_ALIGN(sizeof(pg_data_t));
324 * Copy the static per-cpu data into the region we
325 * just set aside and then setup __per_cpu_offset
326 * for each CPU on this node.
328 for (cpu = 0; cpu < NR_CPUS; cpu++) {
329 if (node == node_cpuid[cpu].nid) {
330 memcpy(__va(cpu_data), __phys_per_cpu_start,
331 __per_cpu_end - __per_cpu_start);
332 __per_cpu_offset[cpu] = (char*)__va(cpu_data) -
334 cpu_data += PERCPU_PAGE_SIZE;
343 * free_node_bootmem - free bootmem allocator memory for use
344 * @start: physical start of range
345 * @len: length of range
346 * @node: node where this range resides
348 * Simply calls the bootmem allocator to free the specified ranged from
349 * the given pg_data_t's bdata struct. After this function has been called
350 * for all the entries in the EFI memory map, the bootmem allocator will
351 * be ready to service allocation requests.
353 static int __init free_node_bootmem(unsigned long start, unsigned long len,
356 free_bootmem_node(mem_data[node].pgdat, start, len);
362 * reserve_pernode_space - reserve memory for per-node space
364 * Reserve the space used by the bootmem maps & per-node space in the boot
365 * allocator so that when we actually create the real mem maps we don't
368 static void __init reserve_pernode_space(void)
370 unsigned long base, size, pages;
371 struct bootmem_data *bdp;
374 for (node = 0; node < numnodes; node++) {
375 pg_data_t *pdp = mem_data[node].pgdat;
379 /* First the bootmem_map itself */
380 pages = bdp->node_low_pfn - (bdp->node_boot_start>>PAGE_SHIFT);
381 size = bootmem_bootmap_pages(pages) << PAGE_SHIFT;
382 base = __pa(bdp->node_bootmem_map);
383 reserve_bootmem_node(pdp, base, size);
385 /* Now the per-node space */
386 size = mem_data[node].pernode_size;
387 base = __pa(mem_data[node].pernode_addr);
388 reserve_bootmem_node(pdp, base, size);
393 * initialize_pernode_data - fixup per-cpu & per-node pointers
395 * Each node's per-node area has a copy of the global pg_data_t list, so
396 * we copy that to each node here, as well as setting the per-cpu pointer
397 * to the local node data structure. The active_cpus field of the per-node
398 * structure gets setup by the platform_cpu_init() function later.
400 static void __init initialize_pernode_data(void)
403 pg_data_t *pgdat_list[NR_NODES];
405 for (node = 0; node < numnodes; node++)
406 pgdat_list[node] = mem_data[node].pgdat;
408 /* Copy the pg_data_t list to each node and init the node field */
409 for (node = 0; node < numnodes; node++) {
410 memcpy(mem_data[node].node_data->pg_data_ptrs, pgdat_list,
414 /* Set the node_data pointer for each per-cpu struct */
415 for (cpu = 0; cpu < NR_CPUS; cpu++) {
416 node = node_cpuid[cpu].nid;
417 per_cpu(cpu_info, cpu).node_data = mem_data[node].node_data;
422 * find_memory - walk the EFI memory map and setup the bootmem allocator
424 * Called early in boot to setup the bootmem allocator, and to
425 * allocate the per-cpu and per-node structures.
427 void __init find_memory(void)
434 printk(KERN_ERR "node info missing!\n");
442 reassign_cpu_only_nodes();
444 /* These actually end up getting called by call_pernode_memory() */
445 efi_memmap_walk(filter_rsvd_memory, build_node_maps);
446 efi_memmap_walk(filter_rsvd_memory, find_pernode_space);
449 * Initialize the boot memory maps in reverse order since that's
450 * what the bootmem allocator expects
452 for (node = numnodes - 1; node >= 0; node--) {
453 unsigned long pernode, pernodesize, map;
454 struct bootmem_data *bdp;
456 bdp = &mem_data[node].bootmem_data;
457 pernode = mem_data[node].pernode_addr;
458 pernodesize = mem_data[node].pernode_size;
459 map = pernode + pernodesize;
461 /* Sanity check... */
463 panic("pernode space for node %d "
464 "could not be allocated!", node);
466 init_bootmem_node(mem_data[node].pgdat,
468 bdp->node_boot_start>>PAGE_SHIFT,
472 efi_memmap_walk(filter_rsvd_memory, free_node_bootmem);
474 reserve_pernode_space();
475 initialize_pernode_data();
477 max_pfn = max_low_pfn;
483 * per_cpu_init - setup per-cpu variables
485 * find_pernode_space() does most of this already, we just need to set
486 * local_per_cpu_offset
488 void *per_cpu_init(void)
492 if (smp_processor_id() == 0) {
493 for (cpu = 0; cpu < NR_CPUS; cpu++) {
494 per_cpu(local_per_cpu_offset, cpu) =
495 __per_cpu_offset[cpu];
499 return __per_cpu_start + __per_cpu_offset[smp_processor_id()];
503 * show_mem - give short summary of memory stats
505 * Shows a simple page count of reserved and used pages in the system.
506 * For discontig machines, it does this on a per-pgdat basis.
510 int i, total_reserved = 0;
511 int total_shared = 0, total_cached = 0;
512 unsigned long total_present = 0;
515 printk("Mem-info:\n");
517 printk("Free swap: %6ldkB\n", nr_swap_pages<<(PAGE_SHIFT-10));
518 for_each_pgdat(pgdat) {
519 unsigned long present = pgdat->node_present_pages;
520 int shared = 0, cached = 0, reserved = 0;
521 printk("Node ID: %d\n", pgdat->node_id);
522 for(i = 0; i < pgdat->node_spanned_pages; i++) {
523 if (!ia64_pfn_valid(pgdat->node_start_pfn+i))
525 if (PageReserved(pgdat->node_mem_map+i))
527 else if (PageSwapCache(pgdat->node_mem_map+i))
529 else if (page_count(pgdat->node_mem_map+i))
530 shared += page_count(pgdat->node_mem_map+i)-1;
532 total_present += present;
533 total_reserved += reserved;
534 total_cached += cached;
535 total_shared += shared;
536 printk("\t%ld pages of RAM\n", present);
537 printk("\t%d reserved pages\n", reserved);
538 printk("\t%d pages shared\n", shared);
539 printk("\t%d pages swap cached\n", cached);
541 printk("%ld pages of RAM\n", total_present);
542 printk("%d reserved pages\n", total_reserved);
543 printk("%d pages shared\n", total_shared);
544 printk("%d pages swap cached\n", total_cached);
545 printk("Total of %ld pages in page table cache\n", pgtable_cache_size);
546 printk("%d free buffer pages\n", nr_free_buffer_pages());
549 EXPORT_SYMBOL_GPL(show_mem);
552 * call_pernode_memory - use SRAT to call callback functions with node info
553 * @start: physical start of range
554 * @len: length of range
555 * @arg: function to call for each range
557 * efi_memmap_walk() knows nothing about layout of memory across nodes. Find
558 * out to which node a block of memory belongs. Ignore memory that we cannot
559 * identify, and split blocks that run across multiple nodes.
561 * Take this opportunity to round the start address up and the end address
562 * down to page boundaries.
564 void call_pernode_memory(unsigned long start, unsigned long len, void *arg)
566 unsigned long rs, re, end = start + len;
567 void (*func)(unsigned long, unsigned long, int);
570 start = PAGE_ALIGN(start);
577 if (!num_node_memblks) {
578 /* No SRAT table, so assume one node (node 0) */
580 (*func)(start, end - start, 0);
584 for (i = 0; i < num_node_memblks; i++) {
585 rs = max(start, node_memblk[i].start_paddr);
586 re = min(end, node_memblk[i].start_paddr +
587 node_memblk[i].size);
590 (*func)(rs, re - rs, node_memblk[i].nid);
598 * count_node_pages - callback to build per-node memory info structures
599 * @start: physical start of range
600 * @len: length of range
601 * @node: node where this range resides
603 * Each node has it's own number of physical pages, DMAable pages, start, and
604 * end page frame number. This routine will be called by call_pernode_memory()
605 * for each piece of usable memory and will setup these values for each node.
606 * Very similar to build_maps().
608 static __init int count_node_pages(unsigned long start, unsigned long len, int node)
610 unsigned long end = start + len;
612 mem_data[node].num_physpages += len >> PAGE_SHIFT;
613 if (start <= __pa(MAX_DMA_ADDRESS))
614 mem_data[node].num_dma_physpages +=
615 (min(end, __pa(MAX_DMA_ADDRESS)) - start) >>PAGE_SHIFT;
616 start = GRANULEROUNDDOWN(start);
617 start = ORDERROUNDDOWN(start);
618 end = GRANULEROUNDUP(end);
619 mem_data[node].max_pfn = max(mem_data[node].max_pfn,
621 mem_data[node].min_pfn = min(mem_data[node].min_pfn,
622 start >> PAGE_SHIFT);
628 * paging_init - setup page tables
630 * paging_init() sets up the page tables for each node of the system and frees
631 * the bootmem allocator memory for general use.
633 void __init paging_init(void)
635 unsigned long max_dma;
636 unsigned long zones_size[MAX_NR_ZONES];
637 unsigned long zholes_size[MAX_NR_ZONES];
638 unsigned long pfn_offset = 0;
641 max_dma = virt_to_phys((void *) MAX_DMA_ADDRESS) >> PAGE_SHIFT;
643 /* so min() will work in count_node_pages */
644 for (node = 0; node < numnodes; node++)
645 mem_data[node].min_pfn = ~0UL;
647 efi_memmap_walk(filter_rsvd_memory, count_node_pages);
649 for (node = 0; node < numnodes; node++) {
650 memset(zones_size, 0, sizeof(zones_size));
651 memset(zholes_size, 0, sizeof(zholes_size));
653 num_physpages += mem_data[node].num_physpages;
655 if (mem_data[node].min_pfn >= max_dma) {
656 /* All of this node's memory is above ZONE_DMA */
657 zones_size[ZONE_NORMAL] = mem_data[node].max_pfn -
658 mem_data[node].min_pfn;
659 zholes_size[ZONE_NORMAL] = mem_data[node].max_pfn -
660 mem_data[node].min_pfn -
661 mem_data[node].num_physpages;
662 } else if (mem_data[node].max_pfn < max_dma) {
663 /* All of this node's memory is in ZONE_DMA */
664 zones_size[ZONE_DMA] = mem_data[node].max_pfn -
665 mem_data[node].min_pfn;
666 zholes_size[ZONE_DMA] = mem_data[node].max_pfn -
667 mem_data[node].min_pfn -
668 mem_data[node].num_dma_physpages;
670 /* This node has memory in both zones */
671 zones_size[ZONE_DMA] = max_dma -
672 mem_data[node].min_pfn;
673 zholes_size[ZONE_DMA] = zones_size[ZONE_DMA] -
674 mem_data[node].num_dma_physpages;
675 zones_size[ZONE_NORMAL] = mem_data[node].max_pfn -
677 zholes_size[ZONE_NORMAL] = zones_size[ZONE_NORMAL] -
678 (mem_data[node].num_physpages -
679 mem_data[node].num_dma_physpages);
684 PAGE_ALIGN(max_low_pfn * sizeof(struct page));
685 vmem_map = (struct page *) vmalloc_end;
687 efi_memmap_walk(create_mem_map_page_table, NULL);
688 printk("Virtual mem_map starts at 0x%p\n", vmem_map);
691 pfn_offset = mem_data[node].min_pfn;
693 NODE_DATA(node)->node_mem_map = vmem_map + pfn_offset;
694 free_area_init_node(node, NODE_DATA(node), zones_size,
695 pfn_offset, zholes_size);
698 zero_page_memmap_ptr = virt_to_page(ia64_imva(empty_zero_page));