X-Git-Url: http://git.onelab.eu/?a=blobdiff_plain;f=mm%2Fbootmem.c;h=d213feded10df30d04fedd2e744fc89ab3f6f934;hb=10a993f7ce5c87cfeaa33c789cb4a12879bef1bb;hp=6eb3f1e5de7270bb61c48f40b86cc9f90d05b7b0;hpb=daddc0d38b3571bed170afa273a49a0eba090c1e;p=linux-2.6.git diff --git a/mm/bootmem.c b/mm/bootmem.c index 6eb3f1e5d..d213feded 100644 --- a/mm/bootmem.c +++ b/mm/bootmem.c @@ -16,8 +16,10 @@ #include #include #include +#include #include #include +#include "internal.h" /* * Access to this subsystem has to be serialized externally. (this is @@ -27,6 +29,19 @@ unsigned long max_low_pfn; unsigned long min_low_pfn; unsigned long max_pfn; +EXPORT_SYMBOL(max_pfn); /* This is exported so + * dma_get_required_mask(), which uses + * it, can be an inline function */ + +static LIST_HEAD(bdata_list); +#ifdef CONFIG_CRASH_DUMP +/* + * If we have booted due to a crash, max_pfn will be a very low value. We need + * to know the amount of memory that the previous kernel used. + */ +unsigned long saved_max_pfn; +#endif + /* return the number of _pages_ that will be allocated for the boot bitmap */ unsigned long __init bootmem_bootmap_pages (unsigned long pages) { @@ -38,6 +53,27 @@ unsigned long __init bootmem_bootmap_pages (unsigned long pages) return mapsize; } +/* + * link bdata in order + */ +static void link_bootmem(bootmem_data_t *bdata) +{ + bootmem_data_t *ent; + if (list_empty(&bdata_list)) { + list_add(&bdata->list, &bdata_list); + return; + } + /* insert in order */ + list_for_each_entry(ent, &bdata_list, list) { + if (bdata->node_boot_start < ent->node_boot_start) { + list_add_tail(&bdata->list, &ent->list); + return; + } + } + list_add_tail(&bdata->list, &bdata_list); + return; +} + /* * Called once to set up the allocator itself. @@ -48,13 +84,11 @@ static unsigned long __init init_bootmem_core (pg_data_t *pgdat, bootmem_data_t *bdata = pgdat->bdata; unsigned long mapsize = ((end - start)+7)/8; - pgdat->pgdat_next = pgdat_list; - pgdat_list = pgdat; - - mapsize = (mapsize + (sizeof(long) - 1UL)) & ~(sizeof(long) - 1UL); + mapsize = ALIGN(mapsize, sizeof(long)); bdata->node_bootmem_map = phys_to_virt(mapstart << PAGE_SHIFT); bdata->node_boot_start = (start << PAGE_SHIFT); bdata->node_low_pfn = end; + link_bootmem(bdata); /* * Initially all pages are reserved - setup_arch() has to @@ -82,14 +116,11 @@ static void __init reserve_bootmem_core(bootmem_data_t *bdata, unsigned long add PAGE_SIZE-1)/PAGE_SIZE; unsigned long end = (addr + size + PAGE_SIZE-1)/PAGE_SIZE; - if (!size) BUG(); + BUG_ON(!size); + BUG_ON(sidx >= eidx); + BUG_ON((addr >> PAGE_SHIFT) >= bdata->node_low_pfn); + BUG_ON(end > bdata->node_low_pfn); - if (sidx >= eidx) - BUG(); - if ((addr >> PAGE_SHIFT) >= bdata->node_low_pfn) - BUG(); - if (end > bdata->node_low_pfn) - BUG(); for (i = sidx; i < eidx; i++) if (test_and_set_bit(i, bdata->node_bootmem_map)) { #ifdef CONFIG_DEBUG_BOOTMEM @@ -110,9 +141,8 @@ static void __init free_bootmem_core(bootmem_data_t *bdata, unsigned long addr, unsigned long eidx = (addr + size - bdata->node_boot_start)/PAGE_SIZE; unsigned long end = (addr + size)/PAGE_SIZE; - if (!size) BUG(); - if (end > bdata->node_low_pfn) - BUG(); + BUG_ON(!size); + BUG_ON(end > bdata->node_low_pfn); if (addr < bdata->last_success) bdata->last_success = addr; @@ -124,7 +154,7 @@ static void __init free_bootmem_core(bootmem_data_t *bdata, unsigned long addr, sidx = start - (bdata->node_boot_start/PAGE_SIZE); for (i = sidx; i < eidx; i++) { - if (!test_and_clear_bit(i, bdata->node_bootmem_map)) + if (unlikely(!test_and_clear_bit(i, bdata->node_bootmem_map))) BUG(); } } @@ -140,24 +170,30 @@ static void __init free_bootmem_core(bootmem_data_t *bdata, unsigned long addr, * * alignment has to be a power of 2 value. * - * NOTE: This function is _not_ reenetrant. + * NOTE: This function is _not_ reentrant. */ -static void * __init +void * __init __alloc_bootmem_core(struct bootmem_data *bdata, unsigned long size, - unsigned long align, unsigned long goal) + unsigned long align, unsigned long goal, unsigned long limit) { unsigned long offset, remaining_size, areasize, preferred; - unsigned long i, start = 0, incr, eidx; + unsigned long i, start = 0, incr, eidx, end_pfn = bdata->node_low_pfn; void *ret; if(!size) { printk("__alloc_bootmem_core(): zero-sized request\n"); - dump_stack(); BUG(); } BUG_ON(align & (align-1)); - eidx = bdata->node_low_pfn - (bdata->node_boot_start >> PAGE_SHIFT); + if (limit && bdata->node_boot_start >= limit) + return NULL; + + limit >>=PAGE_SHIFT; + if (limit && end_pfn > limit) + end_pfn = limit; + + eidx = end_pfn - (bdata->node_boot_start >> PAGE_SHIFT); offset = 0; if (align && (bdata->node_boot_start & (align - 1UL)) != 0) @@ -169,15 +205,16 @@ __alloc_bootmem_core(struct bootmem_data *bdata, unsigned long size, * first, then we try to allocate lower pages. */ if (goal && (goal >= bdata->node_boot_start) && - ((goal >> PAGE_SHIFT) < bdata->node_low_pfn)) { + ((goal >> PAGE_SHIFT) < end_pfn)) { preferred = goal - bdata->node_boot_start; if (bdata->last_success >= preferred) - preferred = bdata->last_success; + if (!limit || (limit && limit > bdata->last_success)) + preferred = bdata->last_success; } else preferred = 0; - preferred = ((preferred + align - 1) & ~(align - 1)) >> PAGE_SHIFT; + preferred = ALIGN(preferred, align) >> PAGE_SHIFT; preferred += offset; areasize = (size+PAGE_SIZE-1)/PAGE_SIZE; incr = align >> PAGE_SHIFT ? : 1; @@ -187,6 +224,8 @@ restart_scan: unsigned long j; i = find_next_zero_bit(bdata->node_bootmem_map, eidx, i); i = ALIGN(i, incr); + if (i >= eidx) + break; if (test_bit(i, bdata->node_bootmem_map)) continue; for (j = i + 1; j < i + areasize; ++j) { @@ -218,7 +257,7 @@ found: */ if (align < PAGE_SIZE && bdata->last_offset && bdata->last_pos+1 == start) { - offset = (bdata->last_offset+align-1) & ~(align-1); + offset = ALIGN(bdata->last_offset, align); BUG_ON(offset > PAGE_SIZE); remaining_size = PAGE_SIZE-offset; if (size < remaining_size) { @@ -255,34 +294,50 @@ found: static unsigned long __init free_all_bootmem_core(pg_data_t *pgdat) { struct page *page; + unsigned long pfn; bootmem_data_t *bdata = pgdat->bdata; unsigned long i, count, total = 0; unsigned long idx; unsigned long *map; + int gofast = 0; - if (!bdata->node_bootmem_map) BUG(); + BUG_ON(!bdata->node_bootmem_map); count = 0; /* first extant page of the node */ - page = virt_to_page(phys_to_virt(bdata->node_boot_start)); + pfn = bdata->node_boot_start >> PAGE_SHIFT; idx = bdata->node_low_pfn - (bdata->node_boot_start >> PAGE_SHIFT); map = bdata->node_bootmem_map; + /* Check physaddr is O(LOG2(BITS_PER_LONG)) page aligned */ + if (bdata->node_boot_start == 0 || + ffs(bdata->node_boot_start) - PAGE_SHIFT > ffs(BITS_PER_LONG)) + gofast = 1; for (i = 0; i < idx; ) { unsigned long v = ~map[i / BITS_PER_LONG]; - if (v) { + + if (gofast && v == ~0UL) { + int order; + + page = pfn_to_page(pfn); + count += BITS_PER_LONG; + order = ffs(BITS_PER_LONG) - 1; + __free_pages_bootmem(page, order); + i += BITS_PER_LONG; + page += BITS_PER_LONG; + } else if (v) { unsigned long m; + + page = pfn_to_page(pfn); for (m = 1; m && i < idx; m<<=1, page++, i++) { if (v & m) { count++; - ClearPageReserved(page); - set_page_count(page, 1); - __free_page(page); + __free_pages_bootmem(page, 0); } } } else { i+=BITS_PER_LONG; - page += BITS_PER_LONG; } + pfn += BITS_PER_LONG; } total += count; @@ -294,9 +349,7 @@ static unsigned long __init free_all_bootmem_core(pg_data_t *pgdat) count = 0; for (i = 0; i < ((bdata->node_low_pfn-(bdata->node_boot_start >> PAGE_SHIFT))/8 + PAGE_SIZE-1)/PAGE_SIZE; i++,page++) { count++; - ClearPageReserved(page); - set_page_count(page, 1); - __free_page(page); + __free_pages_bootmem(page, 0); } total += count; bdata->node_bootmem_map = NULL; @@ -324,42 +377,46 @@ unsigned long __init free_all_bootmem_node (pg_data_t *pgdat) return(free_all_bootmem_core(pgdat)); } -#ifndef CONFIG_DISCONTIGMEM unsigned long __init init_bootmem (unsigned long start, unsigned long pages) { max_low_pfn = pages; min_low_pfn = start; - return(init_bootmem_core(&contig_page_data, start, 0, pages)); + return(init_bootmem_core(NODE_DATA(0), start, 0, pages)); } #ifndef CONFIG_HAVE_ARCH_BOOTMEM_NODE void __init reserve_bootmem (unsigned long addr, unsigned long size) { - reserve_bootmem_core(contig_page_data.bdata, addr, size); + reserve_bootmem_core(NODE_DATA(0)->bdata, addr, size); } #endif /* !CONFIG_HAVE_ARCH_BOOTMEM_NODE */ void __init free_bootmem (unsigned long addr, unsigned long size) { - free_bootmem_core(contig_page_data.bdata, addr, size); + free_bootmem_core(NODE_DATA(0)->bdata, addr, size); } unsigned long __init free_all_bootmem (void) { - return(free_all_bootmem_core(&contig_page_data)); + return(free_all_bootmem_core(NODE_DATA(0))); } -#endif /* !CONFIG_DISCONTIGMEM */ -void * __init __alloc_bootmem (unsigned long size, unsigned long align, unsigned long goal) +void * __init __alloc_bootmem_nopanic(unsigned long size, unsigned long align, unsigned long goal) { - pg_data_t *pgdat = pgdat_list; + bootmem_data_t *bdata; void *ptr; - for_each_pgdat(pgdat) - if ((ptr = __alloc_bootmem_core(pgdat->bdata, size, - align, goal))) + list_for_each_entry(bdata, &bdata_list, list) + if ((ptr = __alloc_bootmem_core(bdata, size, align, goal, 0))) return(ptr); + return NULL; +} +void * __init __alloc_bootmem(unsigned long size, unsigned long align, unsigned long goal) +{ + void *mem = __alloc_bootmem_nopanic(size,align,goal); + if (mem) + return mem; /* * Whoops, we cannot satisfy the allocation request. */ @@ -368,19 +425,41 @@ void * __init __alloc_bootmem (unsigned long size, unsigned long align, unsigned return NULL; } -void * __init __alloc_bootmem_node (pg_data_t *pgdat, unsigned long size, unsigned long align, unsigned long goal) + +void * __init __alloc_bootmem_node(pg_data_t *pgdat, unsigned long size, unsigned long align, + unsigned long goal) { void *ptr; - ptr = __alloc_bootmem_core(pgdat->bdata, size, align, goal); + ptr = __alloc_bootmem_core(pgdat->bdata, size, align, goal, 0); if (ptr) return (ptr); + return __alloc_bootmem(size, align, goal); +} + +#define LOW32LIMIT 0xffffffff + +void * __init __alloc_bootmem_low(unsigned long size, unsigned long align, unsigned long goal) +{ + bootmem_data_t *bdata; + void *ptr; + + list_for_each_entry(bdata, &bdata_list, list) + if ((ptr = __alloc_bootmem_core(bdata, size, + align, goal, LOW32LIMIT))) + return(ptr); + /* * Whoops, we cannot satisfy the allocation request. */ - printk(KERN_ALERT "bootmem alloc of %lu bytes failed!\n", size); - panic("Out of memory"); + printk(KERN_ALERT "low bootmem alloc of %lu bytes failed!\n", size); + panic("Out of low memory"); return NULL; } +void * __init __alloc_bootmem_low_node(pg_data_t *pgdat, unsigned long size, + unsigned long align, unsigned long goal) +{ + return __alloc_bootmem_core(pgdat->bdata, size, align, goal, LOW32LIMIT); +}