2 * Copyright (c) 2000, 2003 Silicon Graphics, Inc. All rights reserved.
3 * Copyright (c) 2001 Intel Corp.
4 * Copyright (c) 2001 Tony Luck <tony.luck@intel.com>
5 * Copyright (c) 2002 NEC Corp.
6 * Copyright (c) 2002 Kimio Suganuma <k-suganuma@da.jp.nec.com>
10 * Platform initialization for Discontig Memory
13 #include <linux/kernel.h>
15 #include <linux/swap.h>
16 #include <linux/bootmem.h>
17 #include <linux/acpi.h>
18 #include <linux/efi.h>
19 #include <asm/pgalloc.h>
21 #include <asm/meminit.h>
23 #include <asm/sections.h>
26 * Track per-node information needed to setup the boot memory allocator, the
27 * per-node areas, and the real VM.
29 struct early_node_data {
30 struct ia64_node_data *node_data;
32 unsigned long pernode_addr;
33 unsigned long pernode_size;
34 struct bootmem_data bootmem_data;
35 unsigned long num_physpages;
36 unsigned long num_dma_physpages;
37 unsigned long min_pfn;
38 unsigned long max_pfn;
41 static struct early_node_data mem_data[NR_NODES] __initdata;
44 * reassign_cpu_only_nodes - called from find_memory to move CPU-only nodes to a memory node
46 * This function will move nodes with only CPUs (no memory)
47 * to a node with memory which is at the minimum numa_slit distance.
48 * Any reassigments will result in the compression of the nodes
49 * and renumbering the nid values where appropriate.
50 * The static declarations below are to avoid large stack size which
51 * makes the code not re-entrant.
53 static void __init reassign_cpu_only_nodes(void)
55 struct node_memblk_s *p;
56 int i, j, k, nnode, nid, cpu, cpunid;
58 static DECLARE_BITMAP(nodes_with_mem, NR_NODES) __initdata;
59 static u8 numa_slit_fix[MAX_NUMNODES * MAX_NUMNODES] __initdata;
60 static int node_flip[NR_NODES] __initdata;
62 for (nnode = 0, p = &node_memblk[0]; p < &node_memblk[num_node_memblks]; p++)
63 if (!test_bit(p->nid, (void *) nodes_with_mem)) {
64 set_bit(p->nid, (void *) nodes_with_mem);
69 * All nids with memory.
71 if (nnode == numnodes)
75 * Change nids and attempt to migrate CPU-only nodes
76 * to the best numa_slit (closest neighbor) possible.
77 * For reassigned CPU nodes a nid can't be arrived at
78 * until after this loop because the target nid's new
79 * identity might not have been established yet. So
80 * new nid values are fabricated above numnodes and
81 * mapped back later to their true value.
83 for (nid = 0, i = 0; i < numnodes; i++) {
84 if (test_bit(i, (void *) nodes_with_mem)) {
86 * Save original nid value for numa_slit
87 * fixup and node_cpuid reassignments.
96 for (p = &node_memblk[0]; p < &node_memblk[num_node_memblks]; p++)
105 for (cpu = 0; cpu < NR_CPUS; cpu++)
106 if (node_cpuid[cpu].nid == i) {
107 /* For nodes not being reassigned just fix the cpu's nid. */
108 if (cpunid < numnodes) {
109 node_cpuid[cpu].nid = cpunid;
114 * For nodes being reassigned, find best node by
115 * numa_slit information and then make a temporary
116 * nid value based on current nid and numnodes.
118 for (slit = 0xff, k = numnodes + numnodes, j = 0; j < numnodes; j++)
121 else if (test_bit(j, (void *) nodes_with_mem)) {
122 cslit = numa_slit[i * numnodes + j];
129 node_cpuid[cpu].nid = k;
134 * Fixup temporary nid values for CPU-only nodes.
136 for (cpu = 0; cpu < NR_CPUS; cpu++)
137 if (node_cpuid[cpu].nid == (numnodes + numnodes))
138 node_cpuid[cpu].nid = nnode - 1;
140 for (i = 0; i < nnode; i++)
141 if (node_flip[i] == (node_cpuid[cpu].nid - numnodes)) {
142 node_cpuid[cpu].nid = i;
147 * Fix numa_slit by compressing from larger
148 * nid array to reduced nid array.
150 for (i = 0; i < nnode; i++)
151 for (j = 0; j < nnode; j++)
152 numa_slit_fix[i * nnode + j] =
153 numa_slit[node_flip[i] * numnodes + node_flip[j]];
155 memcpy(numa_slit, numa_slit_fix, sizeof (numa_slit));
163 * To prevent cache aliasing effects, align per-node structures so that they
164 * start at addresses that are strided by node number.
166 #define NODEDATA_ALIGN(addr, node) \
167 ((((addr) + 1024*1024-1) & ~(1024*1024-1)) + (node)*PERCPU_PAGE_SIZE)
170 * build_node_maps - callback to setup bootmem structs for each node
171 * @start: physical start of range
172 * @len: length of range
173 * @node: node where this range resides
175 * We allocate a struct bootmem_data for each piece of memory that we wish to
176 * treat as a virtually contiguous block (i.e. each node). Each such block
177 * must start on an %IA64_GRANULE_SIZE boundary, so we round the address down
178 * if necessary. Any non-existent pages will simply be part of the virtual
179 * memmap. We also update min_low_pfn and max_low_pfn here as we receive
180 * memory ranges from the caller.
182 static int __init build_node_maps(unsigned long start, unsigned long len,
185 unsigned long cstart, epfn, end = start + len;
186 struct bootmem_data *bdp = &mem_data[node].bootmem_data;
188 epfn = GRANULEROUNDUP(end) >> PAGE_SHIFT;
189 cstart = GRANULEROUNDDOWN(start);
191 if (!bdp->node_low_pfn) {
192 bdp->node_boot_start = cstart;
193 bdp->node_low_pfn = epfn;
195 bdp->node_boot_start = min(cstart, bdp->node_boot_start);
196 bdp->node_low_pfn = max(epfn, bdp->node_low_pfn);
199 min_low_pfn = min(min_low_pfn, bdp->node_boot_start>>PAGE_SHIFT);
200 max_low_pfn = max(max_low_pfn, bdp->node_low_pfn);
206 * early_nr_cpus_node - return number of cpus on a given node
207 * @node: node to check
209 * Count the number of cpus on @node. We can't use nr_cpus_node() yet because
210 * acpi_boot_init() (which builds the node_to_cpu_mask array) hasn't been
213 static int early_nr_cpus_node(int node)
217 for (cpu = 0; cpu < NR_CPUS; cpu++)
218 if (node == node_cpuid[cpu].nid)
225 * find_pernode_space - allocate memory for memory map and per-node structures
226 * @start: physical start of range
227 * @len: length of range
228 * @node: node where this range resides
230 * This routine reserves space for the per-cpu data struct, the list of
231 * pg_data_ts and the per-node data struct. Each node will have something like
232 * the following in the first chunk of addr. space large enough to hold it.
234 * ________________________
236 * |~~~~~~~~~~~~~~~~~~~~~~~~| <-- NODEDATA_ALIGN(start, node) for the first
237 * | PERCPU_PAGE_SIZE * | start and length big enough
239 * |------------------------|
240 * | local pg_data_t * |
241 * |------------------------|
242 * | local ia64_node_data |
243 * |------------------------|
245 * |________________________|
247 * Once this space has been set aside, the bootmem maps are initialized. We
248 * could probably move the allocation of the per-cpu and ia64_node_data space
249 * outside of this function and use alloc_bootmem_node(), but doing it here
250 * is straightforward and we get the alignments we want so...
252 static int __init find_pernode_space(unsigned long start, unsigned long len,
255 unsigned long epfn, cpu, cpus;
256 unsigned long pernodesize = 0, pernode, pages, mapsize;
258 struct bootmem_data *bdp = &mem_data[node].bootmem_data;
260 epfn = (start + len) >> PAGE_SHIFT;
262 pages = bdp->node_low_pfn - (bdp->node_boot_start >> PAGE_SHIFT);
263 mapsize = bootmem_bootmap_pages(pages) << PAGE_SHIFT;
266 * Make sure this memory falls within this node's usable memory
267 * since we may have thrown some away in build_maps().
269 if (start < bdp->node_boot_start || epfn > bdp->node_low_pfn)
272 /* Don't setup this node's local space twice... */
273 if (mem_data[node].pernode_addr)
277 * Calculate total size needed, incl. what's necessary
278 * for good alignment and alias prevention.
280 cpus = early_nr_cpus_node(node);
281 pernodesize += PERCPU_PAGE_SIZE * cpus;
282 pernodesize += L1_CACHE_ALIGN(sizeof(pg_data_t));
283 pernodesize += L1_CACHE_ALIGN(sizeof(struct ia64_node_data));
284 pernodesize = PAGE_ALIGN(pernodesize);
285 pernode = NODEDATA_ALIGN(start, node);
287 /* Is this range big enough for what we want to store here? */
288 if (start + len > (pernode + pernodesize + mapsize)) {
289 mem_data[node].pernode_addr = pernode;
290 mem_data[node].pernode_size = pernodesize;
291 memset(__va(pernode), 0, pernodesize);
293 cpu_data = (void *)pernode;
294 pernode += PERCPU_PAGE_SIZE * cpus;
296 mem_data[node].pgdat = __va(pernode);
297 pernode += L1_CACHE_ALIGN(sizeof(pg_data_t));
299 mem_data[node].node_data = __va(pernode);
300 pernode += L1_CACHE_ALIGN(sizeof(struct ia64_node_data));
302 mem_data[node].pgdat->bdata = bdp;
303 pernode += L1_CACHE_ALIGN(sizeof(pg_data_t));
306 * Copy the static per-cpu data into the region we
307 * just set aside and then setup __per_cpu_offset
308 * for each CPU on this node.
310 for (cpu = 0; cpu < NR_CPUS; cpu++) {
311 if (node == node_cpuid[cpu].nid) {
312 memcpy(__va(cpu_data), __phys_per_cpu_start,
313 __per_cpu_end - __per_cpu_start);
314 __per_cpu_offset[cpu] = (char*)__va(cpu_data) -
316 cpu_data += PERCPU_PAGE_SIZE;
325 * free_node_bootmem - free bootmem allocator memory for use
326 * @start: physical start of range
327 * @len: length of range
328 * @node: node where this range resides
330 * Simply calls the bootmem allocator to free the specified ranged from
331 * the given pg_data_t's bdata struct. After this function has been called
332 * for all the entries in the EFI memory map, the bootmem allocator will
333 * be ready to service allocation requests.
335 static int __init free_node_bootmem(unsigned long start, unsigned long len,
338 free_bootmem_node(mem_data[node].pgdat, start, len);
344 * reserve_pernode_space - reserve memory for per-node space
346 * Reserve the space used by the bootmem maps & per-node space in the boot
347 * allocator so that when we actually create the real mem maps we don't
350 static void __init reserve_pernode_space(void)
352 unsigned long base, size, pages;
353 struct bootmem_data *bdp;
356 for (node = 0; node < numnodes; node++) {
357 pg_data_t *pdp = mem_data[node].pgdat;
361 /* First the bootmem_map itself */
362 pages = bdp->node_low_pfn - (bdp->node_boot_start>>PAGE_SHIFT);
363 size = bootmem_bootmap_pages(pages) << PAGE_SHIFT;
364 base = __pa(bdp->node_bootmem_map);
365 reserve_bootmem_node(pdp, base, size);
367 /* Now the per-node space */
368 size = mem_data[node].pernode_size;
369 base = __pa(mem_data[node].pernode_addr);
370 reserve_bootmem_node(pdp, base, size);
375 * initialize_pernode_data - fixup per-cpu & per-node pointers
377 * Each node's per-node area has a copy of the global pg_data_t list, so
378 * we copy that to each node here, as well as setting the per-cpu pointer
379 * to the local node data structure. The active_cpus field of the per-node
380 * structure gets setup by the platform_cpu_init() function later.
382 static void __init initialize_pernode_data(void)
385 pg_data_t *pgdat_list[NR_NODES];
387 for (node = 0; node < numnodes; node++)
388 pgdat_list[node] = mem_data[node].pgdat;
390 /* Copy the pg_data_t list to each node and init the node field */
391 for (node = 0; node < numnodes; node++) {
392 memcpy(mem_data[node].node_data->pg_data_ptrs, pgdat_list,
396 /* Set the node_data pointer for each per-cpu struct */
397 for (cpu = 0; cpu < NR_CPUS; cpu++) {
398 node = node_cpuid[cpu].nid;
399 per_cpu(cpu_info, cpu).node_data = mem_data[node].node_data;
404 * find_memory - walk the EFI memory map and setup the bootmem allocator
406 * Called early in boot to setup the bootmem allocator, and to
407 * allocate the per-cpu and per-node structures.
409 void __init find_memory(void)
416 printk(KERN_ERR "node info missing!\n");
424 reassign_cpu_only_nodes();
426 /* These actually end up getting called by call_pernode_memory() */
427 efi_memmap_walk(filter_rsvd_memory, build_node_maps);
428 efi_memmap_walk(filter_rsvd_memory, find_pernode_space);
431 * Initialize the boot memory maps in reverse order since that's
432 * what the bootmem allocator expects
434 for (node = numnodes - 1; node >= 0; node--) {
435 unsigned long pernode, pernodesize, map;
436 struct bootmem_data *bdp;
438 bdp = &mem_data[node].bootmem_data;
439 pernode = mem_data[node].pernode_addr;
440 pernodesize = mem_data[node].pernode_size;
441 map = pernode + pernodesize;
443 /* Sanity check... */
445 panic("pernode space for node %d "
446 "could not be allocated!", node);
448 init_bootmem_node(mem_data[node].pgdat,
450 bdp->node_boot_start>>PAGE_SHIFT,
454 efi_memmap_walk(filter_rsvd_memory, free_node_bootmem);
456 reserve_pernode_space();
457 initialize_pernode_data();
459 max_pfn = max_low_pfn;
465 * per_cpu_init - setup per-cpu variables
467 * find_pernode_space() does most of this already, we just need to set
468 * local_per_cpu_offset
470 void *per_cpu_init(void)
474 if (smp_processor_id() == 0) {
475 for (cpu = 0; cpu < NR_CPUS; cpu++) {
476 per_cpu(local_per_cpu_offset, cpu) =
477 __per_cpu_offset[cpu];
481 return __per_cpu_start + __per_cpu_offset[smp_processor_id()];
485 * show_mem - give short summary of memory stats
487 * Shows a simple page count of reserved and used pages in the system.
488 * For discontig machines, it does this on a per-pgdat basis.
493 int shared = 0, cached = 0;
496 printk("Mem-info:\n");
498 printk("Free swap: %6dkB\n", nr_swap_pages<<(PAGE_SHIFT-10));
499 for_each_pgdat(pgdat) {
500 printk("Node ID: %d\n", pgdat->node_id);
501 for(i = 0; i < pgdat->node_spanned_pages; i++) {
502 if (!ia64_pfn_valid(pgdat->node_start_pfn+i))
504 if (PageReserved(pgdat->node_mem_map+i))
506 else if (PageSwapCache(pgdat->node_mem_map+i))
508 else if (page_count(pgdat->node_mem_map+i))
509 shared += page_count(pgdat->node_mem_map+i)-1;
511 printk("\t%ld pages of RAM\n", pgdat->node_present_pages);
512 printk("\t%d reserved pages\n", reserved);
513 printk("\t%d pages shared\n", shared);
514 printk("\t%d pages swap cached\n", cached);
516 printk("Total of %ld pages in page table cache\n", pgtable_cache_size);
517 printk("%d free buffer pages\n", nr_free_buffer_pages());
521 * call_pernode_memory - use SRAT to call callback functions with node info
522 * @start: physical start of range
523 * @len: length of range
524 * @arg: function to call for each range
526 * efi_memmap_walk() knows nothing about layout of memory across nodes. Find
527 * out to which node a block of memory belongs. Ignore memory that we cannot
528 * identify, and split blocks that run across multiple nodes.
530 * Take this opportunity to round the start address up and the end address
531 * down to page boundaries.
533 void call_pernode_memory(unsigned long start, unsigned long len, void *arg)
535 unsigned long rs, re, end = start + len;
536 void (*func)(unsigned long, unsigned long, int);
539 start = PAGE_ALIGN(start);
546 if (!num_node_memblks) {
547 /* No SRAT table, so assume one node (node 0) */
549 (*func)(start, len, 0);
553 for (i = 0; i < num_node_memblks; i++) {
554 rs = max(start, node_memblk[i].start_paddr);
555 re = min(end, node_memblk[i].start_paddr +
556 node_memblk[i].size);
559 (*func)(rs, re - rs, node_memblk[i].nid);
567 * count_node_pages - callback to build per-node memory info structures
568 * @start: physical start of range
569 * @len: length of range
570 * @node: node where this range resides
572 * Each node has it's own number of physical pages, DMAable pages, start, and
573 * end page frame number. This routine will be called by call_pernode_memory()
574 * for each piece of usable memory and will setup these values for each node.
575 * Very similar to build_maps().
577 static int count_node_pages(unsigned long start, unsigned long len, int node)
579 unsigned long end = start + len;
581 mem_data[node].num_physpages += len >> PAGE_SHIFT;
582 if (start <= __pa(MAX_DMA_ADDRESS))
583 mem_data[node].num_dma_physpages +=
584 (min(end, __pa(MAX_DMA_ADDRESS)) - start) >>PAGE_SHIFT;
585 start = GRANULEROUNDDOWN(start);
586 start = ORDERROUNDDOWN(start);
587 end = GRANULEROUNDUP(end);
588 mem_data[node].max_pfn = max(mem_data[node].max_pfn,
590 mem_data[node].min_pfn = min(mem_data[node].min_pfn,
591 start >> PAGE_SHIFT);
597 * paging_init - setup page tables
599 * paging_init() sets up the page tables for each node of the system and frees
600 * the bootmem allocator memory for general use.
602 void paging_init(void)
604 unsigned long max_dma;
605 unsigned long zones_size[MAX_NR_ZONES];
606 unsigned long zholes_size[MAX_NR_ZONES];
607 unsigned long max_gap, pfn_offset = 0;
610 max_dma = virt_to_phys((void *) MAX_DMA_ADDRESS) >> PAGE_SHIFT;
612 efi_memmap_walk(find_largest_hole, &max_gap);
614 /* so min() will work in count_node_pages */
615 for (node = 0; node < numnodes; node++)
616 mem_data[node].min_pfn = ~0UL;
618 efi_memmap_walk(filter_rsvd_memory, count_node_pages);
620 for (node = 0; node < numnodes; node++) {
621 memset(zones_size, 0, sizeof(zones_size));
622 memset(zholes_size, 0, sizeof(zholes_size));
624 num_physpages += mem_data[node].num_physpages;
626 if (mem_data[node].min_pfn >= max_dma) {
627 /* All of this node's memory is above ZONE_DMA */
628 zones_size[ZONE_NORMAL] = mem_data[node].max_pfn -
629 mem_data[node].min_pfn;
630 zholes_size[ZONE_NORMAL] = mem_data[node].max_pfn -
631 mem_data[node].min_pfn -
632 mem_data[node].num_physpages;
633 } else if (mem_data[node].max_pfn < max_dma) {
634 /* All of this node's memory is in ZONE_DMA */
635 zones_size[ZONE_DMA] = mem_data[node].max_pfn -
636 mem_data[node].min_pfn;
637 zholes_size[ZONE_DMA] = mem_data[node].max_pfn -
638 mem_data[node].min_pfn -
639 mem_data[node].num_dma_physpages;
641 /* This node has memory in both zones */
642 zones_size[ZONE_DMA] = max_dma -
643 mem_data[node].min_pfn;
644 zholes_size[ZONE_DMA] = zones_size[ZONE_DMA] -
645 mem_data[node].num_dma_physpages;
646 zones_size[ZONE_NORMAL] = mem_data[node].max_pfn -
648 zholes_size[ZONE_NORMAL] = zones_size[ZONE_NORMAL] -
649 (mem_data[node].num_physpages -
650 mem_data[node].num_dma_physpages);
655 PAGE_ALIGN(max_low_pfn * sizeof(struct page));
656 vmem_map = (struct page *) vmalloc_end;
658 efi_memmap_walk(create_mem_map_page_table, 0);
659 printk("Virtual mem_map starts at 0x%p\n", vmem_map);
662 pfn_offset = mem_data[node].min_pfn;
664 free_area_init_node(node, NODE_DATA(node),
665 vmem_map + pfn_offset, zones_size,
666 pfn_offset, zholes_size);
669 zero_page_memmap_ptr = virt_to_page(ia64_imva(empty_zero_page));