X-Git-Url: http://git.onelab.eu/?a=blobdiff_plain;f=mm%2Fmemory_hotplug.c;h=84279127fcd3ab935f922b4c7e1156bb205d9964;hb=refs%2Fheads%2Fvserver;hp=1fe76d963ac21a2d09d5d8cb8e21e687163202f1;hpb=76828883507a47dae78837ab5dec5a5b4513c667;p=linux-2.6.git diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c index 1fe76d963..84279127f 100644 --- a/mm/memory_hotplug.c +++ b/mm/memory_hotplug.c @@ -4,7 +4,6 @@ * Copyright (C) */ -#include #include #include #include @@ -14,6 +13,7 @@ #include #include #include +#include #include #include #include @@ -21,12 +21,43 @@ #include #include #include +#include +#include #include -extern void zonetable_add(struct zone *zone, int nid, int zid, unsigned long pfn, - unsigned long size); -static void __add_zone(struct zone *zone, unsigned long phys_start_pfn) +/* add this memory to iomem resource */ +static struct resource *register_memory_resource(u64 start, u64 size) +{ + struct resource *res; + res = kzalloc(sizeof(struct resource), GFP_KERNEL); + BUG_ON(!res); + + res->name = "System RAM"; + res->start = start; + res->end = start + size - 1; + res->flags = IORESOURCE_MEM; + if (request_resource(&iomem_resource, res) < 0) { + printk("System RAM resource %llx - %llx cannot be added\n", + (unsigned long long)res->start, (unsigned long long)res->end); + kfree(res); + res = NULL; + } + return res; +} + +static void release_memory_resource(struct resource *res) +{ + if (!res) + return; + release_resource(res); + kfree(res); + return; +} + + +#ifdef CONFIG_MEMORY_HOTPLUG_SPARSE +static int __add_zone(struct zone *zone, unsigned long phys_start_pfn) { struct pglist_data *pgdat = zone->zone_pgdat; int nr_pages = PAGES_PER_SECTION; @@ -34,23 +65,36 @@ static void __add_zone(struct zone *zone, unsigned long phys_start_pfn) int zone_type; zone_type = zone - pgdat->node_zones; - memmap_init_zone(nr_pages, nid, zone_type, phys_start_pfn); - zonetable_add(zone, nid, zone_type, phys_start_pfn, nr_pages); + if (!populated_zone(zone)) { + int ret = 0; + ret = init_currently_empty_zone(zone, phys_start_pfn, + nr_pages, MEMMAP_HOTPLUG); + if (ret < 0) + return ret; + } + memmap_init_zone(nr_pages, nid, zone_type, + phys_start_pfn, MEMMAP_HOTPLUG); + return 0; } -extern int sparse_add_one_section(struct zone *zone, unsigned long start_pfn, - int nr_pages); static int __add_section(struct zone *zone, unsigned long phys_start_pfn) { int nr_pages = PAGES_PER_SECTION; int ret; + if (pfn_valid(phys_start_pfn)) + return -EEXIST; + ret = sparse_add_one_section(zone, phys_start_pfn, nr_pages); if (ret < 0) return ret; - __add_zone(zone, phys_start_pfn); + ret = __add_zone(zone, phys_start_pfn); + + if (ret < 0) + return ret; + return register_new_memory(__pfn_to_section(phys_start_pfn)); } @@ -65,16 +109,27 @@ int __add_pages(struct zone *zone, unsigned long phys_start_pfn, { unsigned long i; int err = 0; + int start_sec, end_sec; + /* during initialize mem_map, align hot-added range to section */ + start_sec = pfn_to_section_nr(phys_start_pfn); + end_sec = pfn_to_section_nr(phys_start_pfn + nr_pages - 1); - for (i = 0; i < nr_pages; i += PAGES_PER_SECTION) { - err = __add_section(zone, phys_start_pfn + i); + for (i = start_sec; i <= end_sec; i++) { + err = __add_section(zone, i << PFN_SECTION_SHIFT); - if (err) + /* + * EEXIST is finally dealed with by ioresource collision + * check. see add_memory() => register_memory_resource() + * Warning will be printed if there is collision. + */ + if (err && (err != -EEXIST)) break; + err = 0; } return err; } +EXPORT_SYMBOL_GPL(__add_pages); static void grow_zone_span(struct zone *zone, unsigned long start_pfn, unsigned long end_pfn) @@ -87,8 +142,8 @@ static void grow_zone_span(struct zone *zone, if (start_pfn < zone->zone_start_pfn) zone->zone_start_pfn = start_pfn; - if (end_pfn > old_zone_end_pfn) - zone->spanned_pages = end_pfn - zone->zone_start_pfn; + zone->spanned_pages = max(old_zone_end_pfn, end_pfn) - + zone->zone_start_pfn; zone_span_writeunlock(zone); } @@ -102,8 +157,8 @@ static void grow_pgdat_span(struct pglist_data *pgdat, if (start_pfn < pgdat->node_start_pfn) pgdat->node_start_pfn = start_pfn; - if (end_pfn > old_pgdat_end_pfn) - pgdat->node_spanned_pages = end_pfn - pgdat->node_start_pfn; + pgdat->node_spanned_pages = max(old_pgdat_end_pfn, end_pfn) - + pgdat->node_start_pfn; } int online_pages(unsigned long pfn, unsigned long nr_pages) @@ -111,7 +166,11 @@ int online_pages(unsigned long pfn, unsigned long nr_pages) unsigned long i; unsigned long flags; unsigned long onlined_pages = 0; + struct resource res; + u64 section_end; + unsigned long start_pfn; struct zone *zone; + int need_zonelists_rebuild = 0; /* * This doesn't need a lock to do pfn_to_page(). @@ -124,15 +183,128 @@ int online_pages(unsigned long pfn, unsigned long nr_pages) grow_pgdat_span(zone->zone_pgdat, pfn, pfn + nr_pages); pgdat_resize_unlock(zone->zone_pgdat, &flags); - for (i = 0; i < nr_pages; i++) { - struct page *page = pfn_to_page(pfn + i); - online_page(page); - onlined_pages++; + /* + * If this zone is not populated, then it is not in zonelist. + * This means the page allocator ignores this zone. + * So, zonelist must be updated after online. + */ + if (!populated_zone(zone)) + need_zonelists_rebuild = 1; + + res.start = (u64)pfn << PAGE_SHIFT; + res.end = res.start + ((u64)nr_pages << PAGE_SHIFT) - 1; + res.flags = IORESOURCE_MEM; /* we just need system ram */ + section_end = res.end; + + while ((res.start < res.end) && (find_next_system_ram(&res) >= 0)) { + start_pfn = (unsigned long)(res.start >> PAGE_SHIFT); + nr_pages = (unsigned long) + ((res.end + 1 - res.start) >> PAGE_SHIFT); + + if (PageReserved(pfn_to_page(start_pfn))) { + /* this region's page is not onlined now */ + for (i = 0; i < nr_pages; i++) { + struct page *page = pfn_to_page(start_pfn + i); + online_page(page); + onlined_pages++; + } + } + + res.start = res.end + 1; + res.end = section_end; } zone->present_pages += onlined_pages; zone->zone_pgdat->node_present_pages += onlined_pages; setup_per_zone_pages_min(); + if (need_zonelists_rebuild) + build_all_zonelists(); + vm_total_pages = nr_free_pagecache_pages(); + writeback_set_ratelimit(); return 0; } +#endif /* CONFIG_MEMORY_HOTPLUG_SPARSE */ + +static pg_data_t *hotadd_new_pgdat(int nid, u64 start) +{ + struct pglist_data *pgdat; + unsigned long zones_size[MAX_NR_ZONES] = {0}; + unsigned long zholes_size[MAX_NR_ZONES] = {0}; + unsigned long start_pfn = start >> PAGE_SHIFT; + + pgdat = arch_alloc_nodedata(nid); + if (!pgdat) + return NULL; + + arch_refresh_nodedata(nid, pgdat); + + /* we can use NODE_DATA(nid) from here */ + + /* init node's zones as empty zones, we don't have any present pages.*/ + free_area_init_node(nid, pgdat, zones_size, start_pfn, zholes_size); + + return pgdat; +} + +static void rollback_node_hotadd(int nid, pg_data_t *pgdat) +{ + arch_refresh_nodedata(nid, NULL); + arch_free_nodedata(pgdat); + return; +} + + +int add_memory(int nid, u64 start, u64 size) +{ + pg_data_t *pgdat = NULL; + int new_pgdat = 0; + struct resource *res; + int ret; + + res = register_memory_resource(start, size); + if (!res) + return -EEXIST; + + if (!node_online(nid)) { + pgdat = hotadd_new_pgdat(nid, start); + if (!pgdat) + return -ENOMEM; + new_pgdat = 1; + ret = kswapd_run(nid); + if (ret) + goto error; + } + + /* call arch's memory hotadd */ + ret = arch_add_memory(nid, start, size); + + if (ret < 0) + goto error; + + /* we online node here. we can't roll back from here. */ + node_set_online(nid); + + cpuset_track_online_nodes(); + + if (new_pgdat) { + ret = register_one_node(nid); + /* + * If sysfs file of new node can't create, cpu on the node + * can't be hot-added. There is no rollback way now. + * So, check by BUG_ON() to catch it reluctantly.. + */ + BUG_ON(ret); + } + + return ret; +error: + /* rollback pgdat allocation and others */ + if (new_pgdat) + rollback_node_hotadd(nid, pgdat); + if (res) + release_memory_resource(res); + + return ret; +} +EXPORT_SYMBOL_GPL(add_memory);