2 * Copyright (c) 2000, 2003 Silicon Graphics, Inc. All rights reserved.
3 * Copyright (c) 2001 Intel Corp.
4 * Copyright (c) 2001 Tony Luck <tony.luck@intel.com>
5 * Copyright (c) 2002 NEC Corp.
6 * Copyright (c) 2002 Kimio Suganuma <k-suganuma@da.jp.nec.com>
10 * Platform initialization for Discontig Memory
13 #include <linux/kernel.h>
15 #include <linux/swap.h>
16 #include <linux/bootmem.h>
17 #include <linux/acpi.h>
18 #include <linux/efi.h>
19 #include <asm/pgalloc.h>
21 #include <asm/meminit.h>
23 #include <asm/sections.h>
26 * Track per-node information needed to setup the boot memory allocator, the
27 * per-node areas, and the real VM.
29 struct early_node_data {
30 struct ia64_node_data *node_data;
32 unsigned long pernode_addr;
33 unsigned long pernode_size;
34 struct bootmem_data bootmem_data;
35 unsigned long num_physpages;
36 unsigned long num_dma_physpages;
37 unsigned long min_pfn;
38 unsigned long max_pfn;
41 static struct early_node_data mem_data[NR_NODES] __initdata;
44 * reassign_cpu_only_nodes - called from find_memory to move CPU-only nodes to a memory node
46 * This function will move nodes with only CPUs (no memory)
47 * to a node with memory which is at the minimum numa_slit distance.
48 * Any reassigments will result in the compression of the nodes
49 * and renumbering the nid values where appropriate.
50 * The static declarations below are to avoid large stack size which
51 * makes the code not re-entrant.
53 static void __init reassign_cpu_only_nodes(void)
55 struct node_memblk_s *p;
56 int i, j, k, nnode, nid, cpu, cpunid, pxm;
58 static DECLARE_BITMAP(nodes_with_mem, NR_NODES) __initdata;
59 static u8 numa_slit_fix[MAX_NUMNODES * MAX_NUMNODES] __initdata;
60 static int node_flip[NR_NODES] __initdata;
61 static int old_nid_map[NR_CPUS] __initdata;
63 for (nnode = 0, p = &node_memblk[0]; p < &node_memblk[num_node_memblks]; p++)
64 if (!test_bit(p->nid, (void *) nodes_with_mem)) {
65 set_bit(p->nid, (void *) nodes_with_mem);
70 * All nids with memory.
72 if (nnode == numnodes)
76 * Change nids and attempt to migrate CPU-only nodes
77 * to the best numa_slit (closest neighbor) possible.
78 * For reassigned CPU nodes a nid can't be arrived at
79 * until after this loop because the target nid's new
80 * identity might not have been established yet. So
81 * new nid values are fabricated above numnodes and
82 * mapped back later to their true value.
84 for (nid = 0, i = 0; i < numnodes; i++) {
85 if (test_bit(i, (void *) nodes_with_mem)) {
87 * Save original nid value for numa_slit
88 * fixup and node_cpuid reassignments.
97 for (p = &node_memblk[0]; p < &node_memblk[num_node_memblks]; p++)
106 for (cpu = 0; cpu < NR_CPUS; cpu++)
107 if (node_cpuid[cpu].nid == i) {
109 * For nodes not being reassigned just
110 * fix the cpu's nid and reverse pxm map
112 if (cpunid < numnodes) {
113 pxm = nid_to_pxm_map[i];
114 pxm_to_nid_map[pxm] =
115 node_cpuid[cpu].nid = cpunid;
120 * For nodes being reassigned, find best node by
121 * numa_slit information and then make a temporary
122 * nid value based on current nid and numnodes.
124 for (slit = 0xff, k = numnodes + numnodes, j = 0; j < numnodes; j++)
127 else if (test_bit(j, (void *) nodes_with_mem)) {
128 cslit = numa_slit[i * numnodes + j];
135 /* save old nid map so we can update the pxm */
136 old_nid_map[cpu] = node_cpuid[cpu].nid;
137 node_cpuid[cpu].nid = k;
142 * Fixup temporary nid values for CPU-only nodes.
144 for (cpu = 0; cpu < NR_CPUS; cpu++)
145 if (node_cpuid[cpu].nid == (numnodes + numnodes)) {
146 pxm = nid_to_pxm_map[old_nid_map[cpu]];
147 pxm_to_nid_map[pxm] = node_cpuid[cpu].nid = nnode - 1;
149 for (i = 0; i < nnode; i++) {
150 if (node_flip[i] != (node_cpuid[cpu].nid - numnodes))
153 pxm = nid_to_pxm_map[old_nid_map[cpu]];
154 pxm_to_nid_map[pxm] = node_cpuid[cpu].nid = i;
160 * Fix numa_slit by compressing from larger
161 * nid array to reduced nid array.
163 for (i = 0; i < nnode; i++)
164 for (j = 0; j < nnode; j++)
165 numa_slit_fix[i * nnode + j] =
166 numa_slit[node_flip[i] * numnodes + node_flip[j]];
168 memcpy(numa_slit, numa_slit_fix, sizeof (numa_slit));
170 for (i = nnode; i < numnodes; i++)
179 * To prevent cache aliasing effects, align per-node structures so that they
180 * start at addresses that are strided by node number.
182 #define NODEDATA_ALIGN(addr, node) \
183 ((((addr) + 1024*1024-1) & ~(1024*1024-1)) + (node)*PERCPU_PAGE_SIZE)
186 * build_node_maps - callback to setup bootmem structs for each node
187 * @start: physical start of range
188 * @len: length of range
189 * @node: node where this range resides
191 * We allocate a struct bootmem_data for each piece of memory that we wish to
192 * treat as a virtually contiguous block (i.e. each node). Each such block
193 * must start on an %IA64_GRANULE_SIZE boundary, so we round the address down
194 * if necessary. Any non-existent pages will simply be part of the virtual
195 * memmap. We also update min_low_pfn and max_low_pfn here as we receive
196 * memory ranges from the caller.
198 static int __init build_node_maps(unsigned long start, unsigned long len,
201 unsigned long cstart, epfn, end = start + len;
202 struct bootmem_data *bdp = &mem_data[node].bootmem_data;
204 epfn = GRANULEROUNDUP(end) >> PAGE_SHIFT;
205 cstart = GRANULEROUNDDOWN(start);
207 if (!bdp->node_low_pfn) {
208 bdp->node_boot_start = cstart;
209 bdp->node_low_pfn = epfn;
211 bdp->node_boot_start = min(cstart, bdp->node_boot_start);
212 bdp->node_low_pfn = max(epfn, bdp->node_low_pfn);
215 min_low_pfn = min(min_low_pfn, bdp->node_boot_start>>PAGE_SHIFT);
216 max_low_pfn = max(max_low_pfn, bdp->node_low_pfn);
222 * early_nr_cpus_node - return number of cpus on a given node
223 * @node: node to check
225 * Count the number of cpus on @node. We can't use nr_cpus_node() yet because
226 * acpi_boot_init() (which builds the node_to_cpu_mask array) hasn't been
229 static int early_nr_cpus_node(int node)
233 for (cpu = 0; cpu < NR_CPUS; cpu++)
234 if (node == node_cpuid[cpu].nid)
241 * find_pernode_space - allocate memory for memory map and per-node structures
242 * @start: physical start of range
243 * @len: length of range
244 * @node: node where this range resides
246 * This routine reserves space for the per-cpu data struct, the list of
247 * pg_data_ts and the per-node data struct. Each node will have something like
248 * the following in the first chunk of addr. space large enough to hold it.
250 * ________________________
252 * |~~~~~~~~~~~~~~~~~~~~~~~~| <-- NODEDATA_ALIGN(start, node) for the first
253 * | PERCPU_PAGE_SIZE * | start and length big enough
255 * |------------------------|
256 * | local pg_data_t * |
257 * |------------------------|
258 * | local ia64_node_data |
259 * |------------------------|
261 * |________________________|
263 * Once this space has been set aside, the bootmem maps are initialized. We
264 * could probably move the allocation of the per-cpu and ia64_node_data space
265 * outside of this function and use alloc_bootmem_node(), but doing it here
266 * is straightforward and we get the alignments we want so...
268 static int __init find_pernode_space(unsigned long start, unsigned long len,
271 unsigned long epfn, cpu, cpus;
272 unsigned long pernodesize = 0, pernode, pages, mapsize;
274 struct bootmem_data *bdp = &mem_data[node].bootmem_data;
276 epfn = (start + len) >> PAGE_SHIFT;
278 pages = bdp->node_low_pfn - (bdp->node_boot_start >> PAGE_SHIFT);
279 mapsize = bootmem_bootmap_pages(pages) << PAGE_SHIFT;
282 * Make sure this memory falls within this node's usable memory
283 * since we may have thrown some away in build_maps().
285 if (start < bdp->node_boot_start || epfn > bdp->node_low_pfn)
288 /* Don't setup this node's local space twice... */
289 if (mem_data[node].pernode_addr)
293 * Calculate total size needed, incl. what's necessary
294 * for good alignment and alias prevention.
296 cpus = early_nr_cpus_node(node);
297 pernodesize += PERCPU_PAGE_SIZE * cpus;
298 pernodesize += L1_CACHE_ALIGN(sizeof(pg_data_t));
299 pernodesize += L1_CACHE_ALIGN(sizeof(struct ia64_node_data));
300 pernodesize = PAGE_ALIGN(pernodesize);
301 pernode = NODEDATA_ALIGN(start, node);
303 /* Is this range big enough for what we want to store here? */
304 if (start + len > (pernode + pernodesize + mapsize)) {
305 mem_data[node].pernode_addr = pernode;
306 mem_data[node].pernode_size = pernodesize;
307 memset(__va(pernode), 0, pernodesize);
309 cpu_data = (void *)pernode;
310 pernode += PERCPU_PAGE_SIZE * cpus;
312 mem_data[node].pgdat = __va(pernode);
313 pernode += L1_CACHE_ALIGN(sizeof(pg_data_t));
315 mem_data[node].node_data = __va(pernode);
316 pernode += L1_CACHE_ALIGN(sizeof(struct ia64_node_data));
318 mem_data[node].pgdat->bdata = bdp;
319 pernode += L1_CACHE_ALIGN(sizeof(pg_data_t));
322 * Copy the static per-cpu data into the region we
323 * just set aside and then setup __per_cpu_offset
324 * for each CPU on this node.
326 for (cpu = 0; cpu < NR_CPUS; cpu++) {
327 if (node == node_cpuid[cpu].nid) {
328 memcpy(__va(cpu_data), __phys_per_cpu_start,
329 __per_cpu_end - __per_cpu_start);
330 __per_cpu_offset[cpu] = (char*)__va(cpu_data) -
332 cpu_data += PERCPU_PAGE_SIZE;
341 * free_node_bootmem - free bootmem allocator memory for use
342 * @start: physical start of range
343 * @len: length of range
344 * @node: node where this range resides
346 * Simply calls the bootmem allocator to free the specified ranged from
347 * the given pg_data_t's bdata struct. After this function has been called
348 * for all the entries in the EFI memory map, the bootmem allocator will
349 * be ready to service allocation requests.
351 static int __init free_node_bootmem(unsigned long start, unsigned long len,
354 free_bootmem_node(mem_data[node].pgdat, start, len);
360 * reserve_pernode_space - reserve memory for per-node space
362 * Reserve the space used by the bootmem maps & per-node space in the boot
363 * allocator so that when we actually create the real mem maps we don't
366 static void __init reserve_pernode_space(void)
368 unsigned long base, size, pages;
369 struct bootmem_data *bdp;
372 for (node = 0; node < numnodes; node++) {
373 pg_data_t *pdp = mem_data[node].pgdat;
377 /* First the bootmem_map itself */
378 pages = bdp->node_low_pfn - (bdp->node_boot_start>>PAGE_SHIFT);
379 size = bootmem_bootmap_pages(pages) << PAGE_SHIFT;
380 base = __pa(bdp->node_bootmem_map);
381 reserve_bootmem_node(pdp, base, size);
383 /* Now the per-node space */
384 size = mem_data[node].pernode_size;
385 base = __pa(mem_data[node].pernode_addr);
386 reserve_bootmem_node(pdp, base, size);
391 * initialize_pernode_data - fixup per-cpu & per-node pointers
393 * Each node's per-node area has a copy of the global pg_data_t list, so
394 * we copy that to each node here, as well as setting the per-cpu pointer
395 * to the local node data structure. The active_cpus field of the per-node
396 * structure gets setup by the platform_cpu_init() function later.
398 static void __init initialize_pernode_data(void)
401 pg_data_t *pgdat_list[NR_NODES];
403 for (node = 0; node < numnodes; node++)
404 pgdat_list[node] = mem_data[node].pgdat;
406 /* Copy the pg_data_t list to each node and init the node field */
407 for (node = 0; node < numnodes; node++) {
408 memcpy(mem_data[node].node_data->pg_data_ptrs, pgdat_list,
412 /* Set the node_data pointer for each per-cpu struct */
413 for (cpu = 0; cpu < NR_CPUS; cpu++) {
414 node = node_cpuid[cpu].nid;
415 per_cpu(cpu_info, cpu).node_data = mem_data[node].node_data;
420 * find_memory - walk the EFI memory map and setup the bootmem allocator
422 * Called early in boot to setup the bootmem allocator, and to
423 * allocate the per-cpu and per-node structures.
425 void __init find_memory(void)
432 printk(KERN_ERR "node info missing!\n");
440 reassign_cpu_only_nodes();
442 /* These actually end up getting called by call_pernode_memory() */
443 efi_memmap_walk(filter_rsvd_memory, build_node_maps);
444 efi_memmap_walk(filter_rsvd_memory, find_pernode_space);
447 * Initialize the boot memory maps in reverse order since that's
448 * what the bootmem allocator expects
450 for (node = numnodes - 1; node >= 0; node--) {
451 unsigned long pernode, pernodesize, map;
452 struct bootmem_data *bdp;
454 bdp = &mem_data[node].bootmem_data;
455 pernode = mem_data[node].pernode_addr;
456 pernodesize = mem_data[node].pernode_size;
457 map = pernode + pernodesize;
459 /* Sanity check... */
461 panic("pernode space for node %d "
462 "could not be allocated!", node);
464 init_bootmem_node(mem_data[node].pgdat,
466 bdp->node_boot_start>>PAGE_SHIFT,
470 efi_memmap_walk(filter_rsvd_memory, free_node_bootmem);
472 reserve_pernode_space();
473 initialize_pernode_data();
475 max_pfn = max_low_pfn;
481 * per_cpu_init - setup per-cpu variables
483 * find_pernode_space() does most of this already, we just need to set
484 * local_per_cpu_offset
486 void *per_cpu_init(void)
490 if (smp_processor_id() == 0) {
491 for (cpu = 0; cpu < NR_CPUS; cpu++) {
492 per_cpu(local_per_cpu_offset, cpu) =
493 __per_cpu_offset[cpu];
497 return __per_cpu_start + __per_cpu_offset[smp_processor_id()];
501 * show_mem - give short summary of memory stats
503 * Shows a simple page count of reserved and used pages in the system.
504 * For discontig machines, it does this on a per-pgdat basis.
508 int i, total_reserved = 0;
509 int total_shared = 0, total_cached = 0;
510 unsigned long total_present = 0;
513 printk("Mem-info:\n");
515 printk("Free swap: %6ldkB\n", nr_swap_pages<<(PAGE_SHIFT-10));
516 for_each_pgdat(pgdat) {
517 unsigned long present = pgdat->node_present_pages;
518 int shared = 0, cached = 0, reserved = 0;
519 printk("Node ID: %d\n", pgdat->node_id);
520 for(i = 0; i < pgdat->node_spanned_pages; i++) {
521 if (!ia64_pfn_valid(pgdat->node_start_pfn+i))
523 if (PageReserved(pgdat->node_mem_map+i))
525 else if (PageSwapCache(pgdat->node_mem_map+i))
527 else if (page_count(pgdat->node_mem_map+i))
528 shared += page_count(pgdat->node_mem_map+i)-1;
530 total_present += present;
531 total_reserved += reserved;
532 total_cached += cached;
533 total_shared += shared;
534 printk("\t%ld pages of RAM\n", present);
535 printk("\t%d reserved pages\n", reserved);
536 printk("\t%d pages shared\n", shared);
537 printk("\t%d pages swap cached\n", cached);
539 printk("%ld pages of RAM\n", total_present);
540 printk("%d reserved pages\n", total_reserved);
541 printk("%d pages shared\n", total_shared);
542 printk("%d pages swap cached\n", total_cached);
543 printk("Total of %ld pages in page table cache\n", pgtable_cache_size);
544 printk("%d free buffer pages\n", nr_free_buffer_pages());
548 * call_pernode_memory - use SRAT to call callback functions with node info
549 * @start: physical start of range
550 * @len: length of range
551 * @arg: function to call for each range
553 * efi_memmap_walk() knows nothing about layout of memory across nodes. Find
554 * out to which node a block of memory belongs. Ignore memory that we cannot
555 * identify, and split blocks that run across multiple nodes.
557 * Take this opportunity to round the start address up and the end address
558 * down to page boundaries.
560 void call_pernode_memory(unsigned long start, unsigned long len, void *arg)
562 unsigned long rs, re, end = start + len;
563 void (*func)(unsigned long, unsigned long, int);
566 start = PAGE_ALIGN(start);
573 if (!num_node_memblks) {
574 /* No SRAT table, so assume one node (node 0) */
576 (*func)(start, end - start, 0);
580 for (i = 0; i < num_node_memblks; i++) {
581 rs = max(start, node_memblk[i].start_paddr);
582 re = min(end, node_memblk[i].start_paddr +
583 node_memblk[i].size);
586 (*func)(rs, re - rs, node_memblk[i].nid);
594 * count_node_pages - callback to build per-node memory info structures
595 * @start: physical start of range
596 * @len: length of range
597 * @node: node where this range resides
599 * Each node has it's own number of physical pages, DMAable pages, start, and
600 * end page frame number. This routine will be called by call_pernode_memory()
601 * for each piece of usable memory and will setup these values for each node.
602 * Very similar to build_maps().
604 static int count_node_pages(unsigned long start, unsigned long len, int node)
606 unsigned long end = start + len;
608 mem_data[node].num_physpages += len >> PAGE_SHIFT;
609 if (start <= __pa(MAX_DMA_ADDRESS))
610 mem_data[node].num_dma_physpages +=
611 (min(end, __pa(MAX_DMA_ADDRESS)) - start) >>PAGE_SHIFT;
612 start = GRANULEROUNDDOWN(start);
613 start = ORDERROUNDDOWN(start);
614 end = GRANULEROUNDUP(end);
615 mem_data[node].max_pfn = max(mem_data[node].max_pfn,
617 mem_data[node].min_pfn = min(mem_data[node].min_pfn,
618 start >> PAGE_SHIFT);
624 * paging_init - setup page tables
626 * paging_init() sets up the page tables for each node of the system and frees
627 * the bootmem allocator memory for general use.
629 void paging_init(void)
631 unsigned long max_dma;
632 unsigned long zones_size[MAX_NR_ZONES];
633 unsigned long zholes_size[MAX_NR_ZONES];
634 unsigned long pfn_offset = 0;
637 max_dma = virt_to_phys((void *) MAX_DMA_ADDRESS) >> PAGE_SHIFT;
639 /* so min() will work in count_node_pages */
640 for (node = 0; node < numnodes; node++)
641 mem_data[node].min_pfn = ~0UL;
643 efi_memmap_walk(filter_rsvd_memory, count_node_pages);
645 for (node = 0; node < numnodes; node++) {
646 memset(zones_size, 0, sizeof(zones_size));
647 memset(zholes_size, 0, sizeof(zholes_size));
649 num_physpages += mem_data[node].num_physpages;
651 if (mem_data[node].min_pfn >= max_dma) {
652 /* All of this node's memory is above ZONE_DMA */
653 zones_size[ZONE_NORMAL] = mem_data[node].max_pfn -
654 mem_data[node].min_pfn;
655 zholes_size[ZONE_NORMAL] = mem_data[node].max_pfn -
656 mem_data[node].min_pfn -
657 mem_data[node].num_physpages;
658 } else if (mem_data[node].max_pfn < max_dma) {
659 /* All of this node's memory is in ZONE_DMA */
660 zones_size[ZONE_DMA] = mem_data[node].max_pfn -
661 mem_data[node].min_pfn;
662 zholes_size[ZONE_DMA] = mem_data[node].max_pfn -
663 mem_data[node].min_pfn -
664 mem_data[node].num_dma_physpages;
666 /* This node has memory in both zones */
667 zones_size[ZONE_DMA] = max_dma -
668 mem_data[node].min_pfn;
669 zholes_size[ZONE_DMA] = zones_size[ZONE_DMA] -
670 mem_data[node].num_dma_physpages;
671 zones_size[ZONE_NORMAL] = mem_data[node].max_pfn -
673 zholes_size[ZONE_NORMAL] = zones_size[ZONE_NORMAL] -
674 (mem_data[node].num_physpages -
675 mem_data[node].num_dma_physpages);
680 PAGE_ALIGN(max_low_pfn * sizeof(struct page));
681 vmem_map = (struct page *) vmalloc_end;
683 efi_memmap_walk(create_mem_map_page_table, 0);
684 printk("Virtual mem_map starts at 0x%p\n", vmem_map);
687 pfn_offset = mem_data[node].min_pfn;
689 NODE_DATA(node)->node_mem_map = vmem_map + pfn_offset;
690 free_area_init_node(node, NODE_DATA(node), zones_size,
691 pfn_offset, zholes_size);
694 zero_page_memmap_ptr = virt_to_page(ia64_imva(empty_zero_page));