4 * Copyright (C) 2002 Anton Blanchard <anton@au.ibm.com>, IBM
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
11 #include <linux/threads.h>
12 #include <linux/bootmem.h>
13 #include <linux/init.h>
15 #include <linux/mmzone.h>
16 #include <linux/module.h>
17 #include <linux/cpu.h>
18 #include <linux/notifier.h>
20 #include <asm/machdep.h>
21 #include <asm/abs_addr.h>
23 static int numa_enabled = 1;
25 static int numa_debug;
26 #define dbg(args...) if (numa_debug) { printk(KERN_INFO args); }
29 #define ARRAY_INITIALISER -1
31 #define ARRAY_INITIALISER 0
34 int numa_cpu_lookup_table[NR_CPUS] = { [ 0 ... (NR_CPUS - 1)] =
36 char *numa_memory_lookup_table;
37 cpumask_t numa_cpumask_lookup_table[MAX_NUMNODES];
38 int nr_cpus_in_node[MAX_NUMNODES] = { [0 ... (MAX_NUMNODES -1)] = 0};
40 struct pglist_data *node_data[MAX_NUMNODES];
41 bootmem_data_t __initdata plat_node_bdata[MAX_NUMNODES];
42 static unsigned long node0_io_hole_size;
43 static int min_common_depth;
46 * We need somewhere to store start/span for each node until we have
47 * allocated the real node_data structures.
50 unsigned long node_start_pfn;
51 unsigned long node_spanned_pages;
52 } init_node_data[MAX_NUMNODES] __initdata;
54 EXPORT_SYMBOL(node_data);
55 EXPORT_SYMBOL(numa_cpu_lookup_table);
56 EXPORT_SYMBOL(numa_memory_lookup_table);
57 EXPORT_SYMBOL(numa_cpumask_lookup_table);
58 EXPORT_SYMBOL(nr_cpus_in_node);
60 static inline void map_cpu_to_node(int cpu, int node)
62 numa_cpu_lookup_table[cpu] = node;
63 if (!(cpu_isset(cpu, numa_cpumask_lookup_table[node]))) {
64 cpu_set(cpu, numa_cpumask_lookup_table[node]);
65 nr_cpus_in_node[node]++;
69 #ifdef CONFIG_HOTPLUG_CPU
70 static void unmap_cpu_from_node(unsigned long cpu)
72 int node = numa_cpu_lookup_table[cpu];
74 dbg("removing cpu %lu from node %d\n", cpu, node);
76 if (cpu_isset(cpu, numa_cpumask_lookup_table[node])) {
77 cpu_clear(cpu, numa_cpumask_lookup_table[node]);
78 nr_cpus_in_node[node]--;
80 printk(KERN_ERR "WARNING: cpu %lu not found in node %d\n",
84 #endif /* CONFIG_HOTPLUG_CPU */
86 static struct device_node * __devinit find_cpu_node(unsigned int cpu)
88 unsigned int hw_cpuid = get_hard_smp_processor_id(cpu);
89 struct device_node *cpu_node = NULL;
90 unsigned int *interrupt_server, *reg;
93 while ((cpu_node = of_find_node_by_type(cpu_node, "cpu")) != NULL) {
94 /* Try interrupt server first */
95 interrupt_server = (unsigned int *)get_property(cpu_node,
96 "ibm,ppc-interrupt-server#s", &len);
98 len = len / sizeof(u32);
100 if (interrupt_server && (len > 0)) {
102 if (interrupt_server[len] == hw_cpuid)
106 reg = (unsigned int *)get_property(cpu_node,
108 if (reg && (len > 0) && (reg[0] == hw_cpuid))
116 /* must hold reference to node during call */
117 static int *of_get_associativity(struct device_node *dev)
119 return (unsigned int *)get_property(dev, "ibm,associativity", NULL);
122 static int of_node_numa_domain(struct device_node *device)
127 if (min_common_depth == -1)
130 tmp = of_get_associativity(device);
131 if (tmp && (tmp[0] >= min_common_depth)) {
132 numa_domain = tmp[min_common_depth];
134 dbg("WARNING: no NUMA information for %s\n",
142 * In theory, the "ibm,associativity" property may contain multiple
143 * associativity lists because a resource may be multiply connected
144 * into the machine. This resource then has different associativity
145 * characteristics relative to its multiple connections. We ignore
146 * this for now. We also assume that all cpu and memory sets have
147 * their distances represented at a common level. This won't be
148 * true for heirarchical NUMA.
150 * In any case the ibm,associativity-reference-points should give
151 * the correct depth for a normal NUMA system.
153 * - Dave Hansen <haveblue@us.ibm.com>
155 static int __init find_min_common_depth(void)
158 unsigned int *ref_points;
159 struct device_node *rtas_root;
162 rtas_root = of_find_node_by_path("/rtas");
168 * this property is 2 32-bit integers, each representing a level of
169 * depth in the associativity nodes. The first is for an SMP
170 * configuration (should be all 0's) and the second is for a normal
171 * NUMA configuration.
173 ref_points = (unsigned int *)get_property(rtas_root,
174 "ibm,associativity-reference-points", &len);
176 if ((len >= 1) && ref_points) {
177 depth = ref_points[1];
179 dbg("WARNING: could not find NUMA "
180 "associativity reference point\n");
183 of_node_put(rtas_root);
188 static unsigned long read_cell_ul(struct device_node *device, unsigned int **buf)
191 unsigned long result = 0;
193 i = prom_n_size_cells(device);
196 result = (result << 32) | **buf;
203 * Figure out to which domain a cpu belongs and stick it there.
204 * Return the id of the domain used.
206 static int numa_setup_cpu(unsigned long lcpu)
209 struct device_node *cpu = find_cpu_node(lcpu);
216 numa_domain = of_node_numa_domain(cpu);
218 if (numa_domain >= MAX_NUMNODES) {
220 * POWER4 LPAR uses 0xffff as invalid node,
221 * dont warn in this case.
223 if (numa_domain != 0xffff)
224 printk(KERN_ERR "WARNING: cpu %ld "
225 "maps to invalid NUMA node %d\n",
230 node_set_online(numa_domain);
232 map_cpu_to_node(lcpu, numa_domain);
239 static int cpu_numa_callback(struct notifier_block *nfb,
240 unsigned long action,
243 unsigned long lcpu = (unsigned long)hcpu;
244 int ret = NOTIFY_DONE;
248 if (min_common_depth == -1 || !numa_enabled)
249 map_cpu_to_node(lcpu, 0);
251 numa_setup_cpu(lcpu);
254 #ifdef CONFIG_HOTPLUG_CPU
256 case CPU_UP_CANCELED:
257 unmap_cpu_from_node(lcpu);
265 static int __init parse_numa_properties(void)
267 struct device_node *memory = NULL;
269 long entries = lmb_end_of_DRAM() >> MEMORY_INCREMENT_SHIFT;
272 if (numa_enabled == 0) {
273 printk(KERN_WARNING "NUMA disabled by user\n");
277 numa_memory_lookup_table =
278 (char *)abs_to_virt(lmb_alloc(entries * sizeof(char), 1));
279 memset(numa_memory_lookup_table, 0, entries * sizeof(char));
281 for (i = 0; i < entries ; i++)
282 numa_memory_lookup_table[i] = ARRAY_INITIALISER;
284 min_common_depth = find_min_common_depth();
286 dbg("NUMA associativity depth for CPU/Memory: %d\n", min_common_depth);
287 if (min_common_depth < 0)
288 return min_common_depth;
290 max_domain = numa_setup_cpu(boot_cpuid);
293 while ((memory = of_find_node_by_type(memory, "memory")) != NULL) {
298 unsigned int *memcell_buf;
301 memcell_buf = (unsigned int *)get_property(memory, "reg", &len);
302 if (!memcell_buf || len <= 0)
305 ranges = memory->n_addrs;
307 /* these are order-sensitive, and modify the buffer pointer */
308 start = read_cell_ul(memory, &memcell_buf);
309 size = read_cell_ul(memory, &memcell_buf);
311 start = _ALIGN_DOWN(start, MEMORY_INCREMENT);
312 size = _ALIGN_UP(size, MEMORY_INCREMENT);
314 numa_domain = of_node_numa_domain(memory);
316 if (numa_domain >= MAX_NUMNODES) {
317 if (numa_domain != 0xffff)
318 printk(KERN_ERR "WARNING: memory at %lx maps "
319 "to invalid NUMA node %d\n", start,
324 node_set_online(numa_domain);
326 if (max_domain < numa_domain)
327 max_domain = numa_domain;
330 * For backwards compatibility, OF splits the first node
331 * into two regions (the first being 0-4GB). Check for
332 * this simple case and complain if there is a gap in
335 if (init_node_data[numa_domain].node_spanned_pages) {
336 unsigned long shouldstart =
337 init_node_data[numa_domain].node_start_pfn +
338 init_node_data[numa_domain].node_spanned_pages;
339 if (shouldstart != (start / PAGE_SIZE)) {
340 printk(KERN_ERR "WARNING: Hole in node, "
341 "disabling region start %lx "
342 "length %lx\n", start, size);
345 init_node_data[numa_domain].node_spanned_pages +=
348 init_node_data[numa_domain].node_start_pfn =
350 init_node_data[numa_domain].node_spanned_pages =
354 for (i = start ; i < (start+size); i += MEMORY_INCREMENT)
355 numa_memory_lookup_table[i >> MEMORY_INCREMENT_SHIFT] =
363 numnodes = max_domain + 1;
368 static void __init setup_nonnuma(void)
370 unsigned long top_of_ram = lmb_end_of_DRAM();
371 unsigned long total_ram = lmb_phys_mem_size();
374 printk(KERN_INFO "Top of RAM: 0x%lx, Total RAM: 0x%lx\n",
375 top_of_ram, total_ram);
376 printk(KERN_INFO "Memory hole size: %ldMB\n",
377 (top_of_ram - total_ram) >> 20);
379 if (!numa_memory_lookup_table) {
380 long entries = top_of_ram >> MEMORY_INCREMENT_SHIFT;
381 numa_memory_lookup_table =
382 (char *)abs_to_virt(lmb_alloc(entries * sizeof(char), 1));
383 memset(numa_memory_lookup_table, 0, entries * sizeof(char));
384 for (i = 0; i < entries ; i++)
385 numa_memory_lookup_table[i] = ARRAY_INITIALISER;
388 map_cpu_to_node(boot_cpuid, 0);
392 init_node_data[0].node_start_pfn = 0;
393 init_node_data[0].node_spanned_pages = lmb_end_of_DRAM() / PAGE_SIZE;
395 for (i = 0 ; i < top_of_ram; i += MEMORY_INCREMENT)
396 numa_memory_lookup_table[i >> MEMORY_INCREMENT_SHIFT] = 0;
398 node0_io_hole_size = top_of_ram - total_ram;
401 static void __init dump_numa_topology(void)
406 if (min_common_depth == -1 || !numa_enabled)
409 for (node = 0; node < MAX_NUMNODES; node++) {
412 if (!node_online(node))
415 printk(KERN_INFO "Node %d Memory:", node);
419 for (i = 0; i < lmb_end_of_DRAM(); i += MEMORY_INCREMENT) {
420 if (numa_memory_lookup_table[i >> MEMORY_INCREMENT_SHIFT] == node) {
439 * Allocate some memory, satisfying the lmb or bootmem allocator where
440 * required. nid is the preferred node and end is the physical address of
441 * the highest address in the node.
443 * Returns the physical address of the memory.
445 static unsigned long careful_allocation(int nid, unsigned long size,
446 unsigned long align, unsigned long end)
448 unsigned long ret = lmb_alloc_base(size, align, end);
450 /* retry over all memory */
452 ret = lmb_alloc_base(size, align, lmb_end_of_DRAM());
455 panic("numa.c: cannot allocate %lu bytes on node %d",
459 * If the memory came from a previously allocated node, we must
460 * retry with the bootmem allocator.
462 if (pa_to_nid(ret) < nid) {
463 nid = pa_to_nid(ret);
464 ret = (unsigned long)__alloc_bootmem_node(NODE_DATA(nid),
468 panic("numa.c: cannot allocate %lu bytes on node %d",
471 ret = virt_to_abs(ret);
473 dbg("alloc_bootmem %lx %lx\n", ret, size);
479 void __init do_init_bootmem(void)
484 max_low_pfn = lmb_end_of_DRAM() >> PAGE_SHIFT;
485 max_pfn = max_low_pfn;
487 if (parse_numa_properties())
490 dump_numa_topology();
492 * This must run before the sched domains notifier.
494 hotcpu_notifier(cpu_numa_callback, 1);
496 for (nid = 0; nid < numnodes; nid++) {
497 unsigned long start_paddr, end_paddr;
499 unsigned long bootmem_paddr;
500 unsigned long bootmap_pages;
502 start_paddr = init_node_data[nid].node_start_pfn * PAGE_SIZE;
503 end_paddr = start_paddr + (init_node_data[nid].node_spanned_pages * PAGE_SIZE);
505 /* Allocate the node structure node local if possible */
506 NODE_DATA(nid) = (struct pglist_data *)careful_allocation(nid,
507 sizeof(struct pglist_data),
508 SMP_CACHE_BYTES, end_paddr);
509 NODE_DATA(nid) = abs_to_virt(NODE_DATA(nid));
510 memset(NODE_DATA(nid), 0, sizeof(struct pglist_data));
512 dbg("node %d\n", nid);
513 dbg("NODE_DATA() = %p\n", NODE_DATA(nid));
515 NODE_DATA(nid)->bdata = &plat_node_bdata[nid];
516 NODE_DATA(nid)->node_start_pfn =
517 init_node_data[nid].node_start_pfn;
518 NODE_DATA(nid)->node_spanned_pages =
519 init_node_data[nid].node_spanned_pages;
521 if (init_node_data[nid].node_spanned_pages == 0)
524 dbg("start_paddr = %lx\n", start_paddr);
525 dbg("end_paddr = %lx\n", end_paddr);
527 bootmap_pages = bootmem_bootmap_pages((end_paddr - start_paddr) >> PAGE_SHIFT);
529 bootmem_paddr = careful_allocation(nid,
530 bootmap_pages << PAGE_SHIFT,
531 PAGE_SIZE, end_paddr);
532 memset(abs_to_virt(bootmem_paddr), 0,
533 bootmap_pages << PAGE_SHIFT);
534 dbg("bootmap_paddr = %lx\n", bootmem_paddr);
536 init_bootmem_node(NODE_DATA(nid), bootmem_paddr >> PAGE_SHIFT,
537 start_paddr >> PAGE_SHIFT,
538 end_paddr >> PAGE_SHIFT);
540 for (i = 0; i < lmb.memory.cnt; i++) {
541 unsigned long physbase, size;
543 physbase = lmb.memory.region[i].physbase;
544 size = lmb.memory.region[i].size;
546 if (physbase < end_paddr &&
547 (physbase+size) > start_paddr) {
549 if (physbase < start_paddr) {
550 size -= start_paddr - physbase;
551 physbase = start_paddr;
554 if (size > end_paddr - physbase)
555 size = end_paddr - physbase;
557 dbg("free_bootmem %lx %lx\n", physbase, size);
558 free_bootmem_node(NODE_DATA(nid), physbase,
563 for (i = 0; i < lmb.reserved.cnt; i++) {
564 unsigned long physbase = lmb.reserved.region[i].physbase;
565 unsigned long size = lmb.reserved.region[i].size;
567 if (physbase < end_paddr &&
568 (physbase+size) > start_paddr) {
570 if (physbase < start_paddr) {
571 size -= start_paddr - physbase;
572 physbase = start_paddr;
575 if (size > end_paddr - physbase)
576 size = end_paddr - physbase;
578 dbg("reserve_bootmem %lx %lx\n", physbase,
580 reserve_bootmem_node(NODE_DATA(nid), physbase,
587 void __init paging_init(void)
589 unsigned long zones_size[MAX_NR_ZONES];
590 unsigned long zholes_size[MAX_NR_ZONES];
593 memset(zones_size, 0, sizeof(zones_size));
594 memset(zholes_size, 0, sizeof(zholes_size));
596 for (nid = 0; nid < numnodes; nid++) {
597 unsigned long start_pfn;
598 unsigned long end_pfn;
600 start_pfn = plat_node_bdata[nid].node_boot_start >> PAGE_SHIFT;
601 end_pfn = plat_node_bdata[nid].node_low_pfn;
603 zones_size[ZONE_DMA] = end_pfn - start_pfn;
604 zholes_size[ZONE_DMA] = 0;
606 zholes_size[ZONE_DMA] = node0_io_hole_size >> PAGE_SHIFT;
608 dbg("free_area_init node %d %lx %lx (hole: %lx)\n", nid,
609 zones_size[ZONE_DMA], start_pfn, zholes_size[ZONE_DMA]);
611 free_area_init_node(nid, NODE_DATA(nid), zones_size,
612 start_pfn, zholes_size);
616 static int __init early_numa(char *p)
621 if (strstr(p, "off"))
624 if (strstr(p, "debug"))
629 early_param("numa", early_numa);