X-Git-Url: http://git.onelab.eu/?a=blobdiff_plain;f=arch%2Fi386%2Fmm%2Fdiscontig.c;h=fe6eb901326e6610171a927dddd5e01280d7a71b;hb=43bc926fffd92024b46cafaf7350d669ba9ca884;hp=c1de0903db028719d333c6233d8855f2cca6da5f;hpb=5273a3df6485dc2ad6aa7ddd441b9a21970f003b;p=linux-2.6.git diff --git a/arch/i386/mm/discontig.c b/arch/i386/mm/discontig.c index c1de0903d..fe6eb9013 100644 --- a/arch/i386/mm/discontig.c +++ b/arch/i386/mm/discontig.c @@ -28,24 +28,35 @@ #include #include #include +#include +#include +#include +#include + #include #include #include +#include -struct pglist_data *node_data[MAX_NUMNODES]; +struct pglist_data *node_data[MAX_NUMNODES] __read_mostly; +EXPORT_SYMBOL(node_data); bootmem_data_t node0_bdata; /* * numa interface - we expect the numa architecture specfic code to have * populated the following initialisation. * - * 1) numnodes - the total number of nodes configured in the system - * 2) physnode_map - the mapping between a pfn and owning node - * 3) node_start_pfn - the starting page frame number for a node + * 1) node_online_map - the map of all nodes configured (online) in the system + * 2) node_start_pfn - the starting page frame number for a node * 3) node_end_pfn - the ending page fram number for a node */ +unsigned long node_start_pfn[MAX_NUMNODES] __read_mostly; +unsigned long node_end_pfn[MAX_NUMNODES] __read_mostly; + +#ifdef CONFIG_DISCONTIGMEM /* + * 4) physnode_map - the mapping between a pfn and owning node * physnode_map keeps track of the physical memory layout of a generic * numa node on a 256Mb break (each element of the array will * represent 256Mb of memory and will be marked by the node id. so, @@ -56,14 +67,39 @@ bootmem_data_t node0_bdata; * physnode_map[4-7] = 1; * physnode_map[8- ] = -1; */ -u8 physnode_map[MAX_ELEMENTS] = { [0 ... (MAX_ELEMENTS - 1)] = -1}; +s8 physnode_map[MAX_ELEMENTS] __read_mostly = { [0 ... (MAX_ELEMENTS - 1)] = -1}; +EXPORT_SYMBOL(physnode_map); -unsigned long node_start_pfn[MAX_NUMNODES]; -unsigned long node_end_pfn[MAX_NUMNODES]; +void memory_present(int nid, unsigned long start, unsigned long end) +{ + unsigned long pfn; + + printk(KERN_INFO "Node: %d, start_pfn: %ld, end_pfn: %ld\n", + nid, start, end); + printk(KERN_DEBUG " Setting physnode_map array to node %d for pfns:\n", nid); + printk(KERN_DEBUG " "); + for (pfn = start; pfn < end; pfn += PAGES_PER_ELEMENT) { + physnode_map[pfn / PAGES_PER_ELEMENT] = nid; + printk("%ld ", pfn); + } + printk("\n"); +} + +unsigned long node_memmap_size_bytes(int nid, unsigned long start_pfn, + unsigned long end_pfn) +{ + unsigned long nr_pages = end_pfn - start_pfn; + + if (!nr_pages) + return 0; + + return (nr_pages + 1) * sizeof(struct page); +} +#endif extern unsigned long find_max_low_pfn(void); extern void find_max_pfn(void); -extern void one_highpage_init(struct page *, int, int); +extern void add_one_highpage_init(struct page *, int, int); extern struct e820map e820; extern unsigned long init_pg_tables_end; @@ -80,6 +116,9 @@ unsigned long node_remap_offset[MAX_NUMNODES]; void *node_remap_start_vaddr[MAX_NUMNODES]; void set_pmd_pfn(unsigned long vaddr, unsigned long pfn, pgprot_t flags); +void *node_remap_end_vaddr[MAX_NUMNODES]; +void *node_remap_alloc_vaddr[MAX_NUMNODES]; + /* * FLAT - support for basic PC memory model with discontig enabled, essentially * a single node with all available processors in it with a flat @@ -87,27 +126,17 @@ void set_pmd_pfn(unsigned long vaddr, unsigned long pfn, pgprot_t flags); */ int __init get_memcfg_numa_flat(void) { - int pfn; - printk("NUMA - single node, flat memory mode\n"); /* Run the memory configuration and find the top of memory. */ find_max_pfn(); - node_start_pfn[0] = 0; - node_end_pfn[0] = max_pfn; - - /* Fill in the physnode_map with our simplistic memory model, - * all memory is in node 0. - */ - for (pfn = node_start_pfn[0]; pfn <= node_end_pfn[0]; - pfn += PAGES_PER_ELEMENT) - { - physnode_map[pfn / PAGES_PER_ELEMENT] = 0; - } + node_start_pfn[0] = 0; + node_end_pfn[0] = max_pfn; + memory_present(0, 0, max_pfn); - /* Indicate there is one node available. */ + /* Indicate there is one node available. */ + nodes_clear(node_online_map); node_set_online(0); - numnodes = 1; return 1; } @@ -128,59 +157,51 @@ static void __init find_max_pfn_node(int nid) BUG(); } +/* Find the owning node for a pfn. */ +int early_pfn_to_nid(unsigned long pfn) +{ + int nid; + + for_each_node(nid) { + if (node_end_pfn[nid] == 0) + break; + if (node_start_pfn[nid] <= pfn && node_end_pfn[nid] >= pfn) + return nid; + } + + return 0; +} + /* - * Allocate memory for the pg_data_t via a crude pre-bootmem method - * We ought to relocate these onto their own node later on during boot. + * Allocate memory for the pg_data_t for this node via a crude pre-bootmem + * method. For node zero take this from the bottom of memory, for + * subsequent nodes place them at node_remap_start_vaddr which contains + * node local data in physically node local memory. See setup_memory() + * for details. */ static void __init allocate_pgdat(int nid) { - if (nid) + if (nid && node_has_online_mem(nid)) NODE_DATA(nid) = (pg_data_t *)node_remap_start_vaddr[nid]; else { NODE_DATA(nid) = (pg_data_t *)(__va(min_low_pfn << PAGE_SHIFT)); min_low_pfn += PFN_UP(sizeof(pg_data_t)); - memset(NODE_DATA(nid), 0, sizeof(pg_data_t)); } } -/* - * Register fully available low RAM pages with the bootmem allocator. - */ -static void __init register_bootmem_low_pages(unsigned long system_max_low_pfn) +void *alloc_remap(int nid, unsigned long size) { - int i; + void *allocation = node_remap_alloc_vaddr[nid]; - for (i = 0; i < e820.nr_map; i++) { - unsigned long curr_pfn, last_pfn, size; - /* - * Reserve usable low memory - */ - if (e820.map[i].type != E820_RAM) - continue; - /* - * We are rounding up the start address of usable memory: - */ - curr_pfn = PFN_UP(e820.map[i].addr); - if (curr_pfn >= system_max_low_pfn) - continue; - /* - * ... and at the end of the usable range downwards: - */ - last_pfn = PFN_DOWN(e820.map[i].addr + e820.map[i].size); + size = ALIGN(size, L1_CACHE_BYTES); - if (last_pfn > system_max_low_pfn) - last_pfn = system_max_low_pfn; + if (!allocation || (allocation + size) >= node_remap_end_vaddr[nid]) + return 0; - /* - * .. finally, did all the rounding and playing - * around just make the area go away? - */ - if (last_pfn <= curr_pfn) - continue; + node_remap_alloc_vaddr[nid] += size; + memset(allocation, 0, size); - size = last_pfn - curr_pfn; - free_bootmem_node(NODE_DATA(0), PFN_PHYS(curr_pfn), PFN_PHYS(size)); - } + return allocation; } void __init remap_numa_kva(void) @@ -189,7 +210,7 @@ void __init remap_numa_kva(void) unsigned long pfn; int node; - for (node = 1; node < numnodes; ++node) { + for_each_online_node(node) { for (pfn=0; pfn < node_remap_size[node]; pfn += PTRS_PER_PTE) { vaddr = node_remap_start_vaddr[node]+(pfn< max_pfn) + continue; + if (node_end_pfn[nid] > max_pfn) + node_end_pfn[nid] = max_pfn; + + /* ensure the remap includes space for the pgdat. */ + size = node_remap_size[nid] + sizeof(pg_data_t); - for (nid = 1; nid < numnodes; nid++) { - /* calculate the size of the mem_map needed in bytes */ - size = (node_end_pfn[nid] - node_start_pfn[nid] + 1) - * sizeof(struct page) + sizeof(pg_data_t); /* convert size to large (pmd size) pages, rounding up */ size = (size + LARGE_PAGE_BYTES - 1) / LARGE_PAGE_BYTES; /* now the roundup is correct, convert to PAGE_SIZE pages */ size = size * PTRS_PER_PTE; + + /* + * Validate the region we are allocating only contains valid + * pages. + */ + for (pfn = node_end_pfn[nid] - size; + pfn < node_end_pfn[nid]; pfn++) + if (!page_is_ram(pfn)) + break; + + if (pfn != node_end_pfn[nid]) + size = 0; + printk("Reserving %ld pages of KVA for lmem_map of node %d\n", size, nid); node_remap_size[nid] = size; - reserve_pages += size; node_remap_offset[nid] = reserve_pages; + reserve_pages += size; printk("Shrinking node %d from %ld pages to %ld pages\n", nid, node_end_pfn[nid], node_end_pfn[nid] - size); + + if (node_end_pfn[nid] & (PTRS_PER_PTE-1)) { + /* + * Align node_end_pfn[] and node_remap_start_pfn[] to + * pmd boundary. remap_numa_kva will barf otherwise. + */ + printk("Shrinking node %d further by %ld pages for proper alignment\n", + nid, node_end_pfn[nid] & (PTRS_PER_PTE-1)); + size += node_end_pfn[nid] & (PTRS_PER_PTE-1); + } + node_end_pfn[nid] -= size; node_remap_start_pfn[nid] = node_end_pfn[nid]; } @@ -227,20 +282,32 @@ static unsigned long calculate_numa_remap_pages(void) return reserve_pages; } +extern void setup_bootmem_allocator(void); unsigned long __init setup_memory(void) { int nid; - unsigned long bootmap_size, system_start_pfn, system_max_low_pfn; + unsigned long system_start_pfn, system_max_low_pfn; unsigned long reserve_pages; + /* + * When mapping a NUMA machine we allocate the node_mem_map arrays + * from node local memory. They are then mapped directly into KVA + * between zone normal and vmalloc space. Calculate the size of + * this space and use it to adjust the boundry between ZONE_NORMAL + * and ZONE_HIGHMEM. + */ + find_max_pfn(); get_memcfg_numa(); + reserve_pages = calculate_numa_remap_pages(); /* partially used pages are not usable - thus round upwards */ system_start_pfn = min_low_pfn = PFN_UP(init_pg_tables_end); - find_max_pfn(); - system_max_low_pfn = max_low_pfn = find_max_low_pfn(); + system_max_low_pfn = max_low_pfn = find_max_low_pfn() - reserve_pages; + printk("reserve_pages = %ld find_max_low_pfn() ~ %ld\n", + reserve_pages, max_low_pfn + reserve_pages); + printk("max_pfn = %ld\n", max_pfn); #ifdef CONFIG_HIGHMEM highstart_pfn = highend_pfn = max_pfn; if (max_pfn > system_max_low_pfn) @@ -248,7 +315,6 @@ unsigned long __init setup_memory(void) printk(KERN_NOTICE "%ldMB HIGHMEM available.\n", pages_to_mb(highend_pfn - highstart_pfn)); #endif - system_max_low_pfn = max_low_pfn = max_low_pfn - reserve_pages; printk(KERN_NOTICE "%ldMB LOWMEM available.\n", pages_to_mb(system_max_low_pfn)); printk("min_low_pfn = %ld, max_low_pfn = %ld, highstart_pfn = %ld\n", @@ -256,100 +322,39 @@ unsigned long __init setup_memory(void) printk("Low memory ends at vaddr %08lx\n", (ulong) pfn_to_kaddr(max_low_pfn)); - for (nid = 0; nid < numnodes; nid++) { + for_each_online_node(nid) { node_remap_start_vaddr[nid] = pfn_to_kaddr( - highstart_pfn - node_remap_offset[nid]); + highstart_pfn + node_remap_offset[nid]); + /* Init the node remap allocator */ + node_remap_end_vaddr[nid] = node_remap_start_vaddr[nid] + + (node_remap_size[nid] * PAGE_SIZE); + node_remap_alloc_vaddr[nid] = node_remap_start_vaddr[nid] + + ALIGN(sizeof(pg_data_t), PAGE_SIZE); + allocate_pgdat(nid); printk ("node %d will remap to vaddr %08lx - %08lx\n", nid, (ulong) node_remap_start_vaddr[nid], (ulong) pfn_to_kaddr(highstart_pfn - - node_remap_offset[nid] + node_remap_size[nid])); + + node_remap_offset[nid] + node_remap_size[nid])); } printk("High memory starts at vaddr %08lx\n", (ulong) pfn_to_kaddr(highstart_pfn)); - for (nid = 0; nid < numnodes; nid++) + vmalloc_earlyreserve = reserve_pages * PAGE_SIZE; + for_each_online_node(nid) find_max_pfn_node(nid); + memset(NODE_DATA(0), 0, sizeof(struct pglist_data)); NODE_DATA(0)->bdata = &node0_bdata; - - /* - * Initialize the boot-time allocator (with low memory only): - */ - bootmap_size = init_bootmem_node(NODE_DATA(0), min_low_pfn, 0, system_max_low_pfn); - - register_bootmem_low_pages(system_max_low_pfn); - - /* - * Reserve the bootmem bitmap itself as well. We do this in two - * steps (first step was init_bootmem()) because this catches - * the (very unlikely) case of us accidentally initializing the - * bootmem allocator with an invalid RAM area. - */ - reserve_bootmem_node(NODE_DATA(0), HIGH_MEMORY, (PFN_PHYS(min_low_pfn) + - bootmap_size + PAGE_SIZE-1) - (HIGH_MEMORY)); - - /* - * reserve physical page 0 - it's a special BIOS page on many boxes, - * enabling clean reboots, SMP operation, laptop functions. - */ - reserve_bootmem_node(NODE_DATA(0), 0, PAGE_SIZE); - - /* - * But first pinch a few for the stack/trampoline stuff - * FIXME: Don't need the extra page at 4K, but need to fix - * trampoline before removing it. (see the GDT stuff) - */ - reserve_bootmem_node(NODE_DATA(0), PAGE_SIZE, PAGE_SIZE); - -#ifdef CONFIG_ACPI_SLEEP - /* - * Reserve low memory region for sleep support. - */ - acpi_reserve_bootmem(); -#endif - - /* - * Find and reserve possible boot-time SMP configuration: - */ - find_smp_config(); - -#ifdef CONFIG_BLK_DEV_INITRD - if (LOADER_TYPE && INITRD_START) { - if (INITRD_START + INITRD_SIZE <= (system_max_low_pfn << PAGE_SHIFT)) { - reserve_bootmem_node(NODE_DATA(0), INITRD_START, INITRD_SIZE); - initrd_start = - INITRD_START ? INITRD_START + PAGE_OFFSET : 0; - initrd_end = initrd_start+INITRD_SIZE; - } - else { - printk(KERN_ERR "initrd extends beyond end of memory " - "(0x%08lx > 0x%08lx)\ndisabling initrd\n", - INITRD_START + INITRD_SIZE, - system_max_low_pfn << PAGE_SHIFT); - initrd_start = 0; - } - } -#endif - return system_max_low_pfn; + setup_bootmem_allocator(); + return max_low_pfn; } void __init zone_sizes_init(void) { int nid; - /* - * Insert nodes into pgdat_list backward so they appear in order. - * Clobber node 0's links and NULL out pgdat_list before starting. - */ - pgdat_list = NULL; - for (nid = numnodes - 1; nid >= 0; nid--) { - if (nid) - memset(NODE_DATA(nid), 0, sizeof(pg_data_t)); - NODE_DATA(nid)->pgdat_next = pgdat_list; - pgdat_list = NODE_DATA(nid); - } - for (nid = 0; nid < numnodes; nid++) { + for_each_online_node(nid) { unsigned long zones_size[MAX_NR_ZONES] = {0, 0, 0}; unsigned long *zholes_size; unsigned int max_dma; @@ -360,42 +365,31 @@ void __init zone_sizes_init(void) max_dma = virt_to_phys((char *)MAX_DMA_ADDRESS) >> PAGE_SHIFT; - if (start > low) { + if (node_has_online_mem(nid)){ + if (start > low) { #ifdef CONFIG_HIGHMEM - BUG_ON(start > high); - zones_size[ZONE_HIGHMEM] = high - start; + BUG_ON(start > high); + zones_size[ZONE_HIGHMEM] = high - start; #endif - } else { - if (low < max_dma) - zones_size[ZONE_DMA] = low; - else { - BUG_ON(max_dma > low); - BUG_ON(low > high); - zones_size[ZONE_DMA] = max_dma; - zones_size[ZONE_NORMAL] = low - max_dma; + } else { + if (low < max_dma) + zones_size[ZONE_DMA] = low; + else { + BUG_ON(max_dma > low); + BUG_ON(low > high); + zones_size[ZONE_DMA] = max_dma; + zones_size[ZONE_NORMAL] = low - max_dma; #ifdef CONFIG_HIGHMEM - zones_size[ZONE_HIGHMEM] = high - low; + zones_size[ZONE_HIGHMEM] = high - low; #endif + } } } + zholes_size = get_zholes_size(nid); - /* - * We let the lmem_map for node 0 be allocated from the - * normal bootmem allocator, but other nodes come from the - * remapped KVA area - mbligh - */ - if (!nid) - free_area_init_node(nid, NODE_DATA(nid), 0, - zones_size, start, zholes_size); - else { - unsigned long lmem_map; - lmem_map = (unsigned long)node_remap_start_vaddr[nid]; - lmem_map += sizeof(pg_data_t) + PAGE_SIZE - 1; - lmem_map &= PAGE_MASK; - free_area_init_node(nid, NODE_DATA(nid), - (struct page *)lmem_map, zones_size, - start, zholes_size); - } + + free_area_init_node(nid, NODE_DATA(nid), zones_size, start, + zholes_size); } return; } @@ -403,32 +397,29 @@ void __init zone_sizes_init(void) void __init set_highmem_pages_init(int bad_ppro) { #ifdef CONFIG_HIGHMEM - int nid; + struct zone *zone; + struct page *page; + + for_each_zone(zone) { + unsigned long node_pfn, zone_start_pfn, zone_end_pfn; + + if (!is_highmem(zone)) + continue; + + zone_start_pfn = zone->zone_start_pfn; + zone_end_pfn = zone_start_pfn + zone->spanned_pages; - for (nid = 0; nid < numnodes; nid++) { - unsigned long node_pfn, node_high_size, zone_start_pfn; - struct page * zone_mem_map; - - node_high_size = NODE_DATA(nid)->node_zones[ZONE_HIGHMEM].spanned_pages; - zone_mem_map = NODE_DATA(nid)->node_zones[ZONE_HIGHMEM].zone_mem_map; - zone_start_pfn = NODE_DATA(nid)->node_zones[ZONE_HIGHMEM].zone_start_pfn; - - printk("Initializing highpages for node %d\n", nid); - for (node_pfn = 0; node_pfn < node_high_size; node_pfn++) { - one_highpage_init((struct page *)(zone_mem_map + node_pfn), - zone_start_pfn + node_pfn, bad_ppro); + printk("Initializing %s for node %d (%08lx:%08lx)\n", + zone->name, zone->zone_pgdat->node_id, + zone_start_pfn, zone_end_pfn); + + for (node_pfn = zone_start_pfn; node_pfn < zone_end_pfn; node_pfn++) { + if (!pfn_valid(node_pfn)) + continue; + page = pfn_to_page(node_pfn); + add_one_highpage_init(page, node_pfn, bad_ppro); } } totalram_pages += totalhigh_pages; #endif } - -void __init set_max_mapnr_init(void) -{ -#ifdef CONFIG_HIGHMEM - highmem_start_page = NODE_DATA(0)->node_zones[ZONE_HIGHMEM].zone_mem_map; - num_physpages = highend_pfn; -#else - num_physpages = max_low_pfn; -#endif -}