4 * Copyright (C) 1999 Ingo Molnar
5 * Discontiguous memory support, Kanoj Sarcar, SGI, Nov 1999
7 * simple boot-time physical memory area allocator and
8 * free memory collector. It's used to deal with reserved
9 * system memory and memory holes as well.
13 #include <linux/kernel_stat.h>
14 #include <linux/swap.h>
15 #include <linux/interrupt.h>
16 #include <linux/init.h>
17 #include <linux/bootmem.h>
18 #include <linux/mmzone.h>
19 #include <linux/module.h>
24 * Access to this subsystem has to be serialized externally. (this is
25 * true for the boot process anyway)
27 unsigned long max_low_pfn;
28 unsigned long min_low_pfn;
29 EXPORT_SYMBOL(min_low_pfn);
30 unsigned long max_pfn;
32 * If we have booted due to a crash, max_pfn will be a very low value. We need
33 * to know the amount of memory that the previous kernel used.
35 unsigned long saved_max_pfn;
37 EXPORT_SYMBOL(max_pfn); /* This is exported so
38 * dma_get_required_mask(), which uses
39 * it, can be an inline function */
41 /* return the number of _pages_ that will be allocated for the boot bitmap */
42 unsigned long __init bootmem_bootmap_pages (unsigned long pages)
44 unsigned long mapsize;
46 mapsize = (pages+7)/8;
47 mapsize = (mapsize + ~PAGE_MASK) & PAGE_MASK;
48 mapsize >>= PAGE_SHIFT;
54 * Called once to set up the allocator itself.
56 static unsigned long __init init_bootmem_core (pg_data_t *pgdat,
57 unsigned long mapstart, unsigned long start, unsigned long end)
59 bootmem_data_t *bdata = pgdat->bdata;
60 unsigned long mapsize = ((end - start)+7)/8;
62 pgdat->pgdat_next = pgdat_list;
65 mapsize = (mapsize + (sizeof(long) - 1UL)) & ~(sizeof(long) - 1UL);
66 bdata->node_bootmem_map = phys_to_virt(mapstart << PAGE_SHIFT);
67 bdata->node_boot_start = (start << PAGE_SHIFT);
68 bdata->node_low_pfn = end;
71 * Initially all pages are reserved - setup_arch() has to
72 * register free RAM areas explicitly.
74 memset(bdata->node_bootmem_map, 0xff, mapsize);
80 * Marks a particular physical memory range as unallocatable. Usable RAM
81 * might be used for boot-time allocations - or it might get added
82 * to the free page pool later on.
84 static void __init reserve_bootmem_core(bootmem_data_t *bdata, unsigned long addr, unsigned long size)
88 * round up, partially reserved pages are considered
91 unsigned long sidx = (addr - bdata->node_boot_start)/PAGE_SIZE;
92 unsigned long eidx = (addr + size - bdata->node_boot_start +
93 PAGE_SIZE-1)/PAGE_SIZE;
94 unsigned long end = (addr + size + PAGE_SIZE-1)/PAGE_SIZE;
98 BUG_ON((addr >> PAGE_SHIFT) >= bdata->node_low_pfn);
99 BUG_ON(end > bdata->node_low_pfn);
101 for (i = sidx; i < eidx; i++)
102 if (test_and_set_bit(i, bdata->node_bootmem_map)) {
103 #ifdef CONFIG_DEBUG_BOOTMEM
104 printk("hm, page %08lx reserved twice.\n", i*PAGE_SIZE);
109 static void __init free_bootmem_core(bootmem_data_t *bdata, unsigned long addr, unsigned long size)
114 * round down end of usable mem, partially free pages are
115 * considered reserved.
118 unsigned long eidx = (addr + size - bdata->node_boot_start)/PAGE_SIZE;
119 unsigned long end = (addr + size)/PAGE_SIZE;
122 BUG_ON(end > bdata->node_low_pfn);
124 if (addr < bdata->last_success)
125 bdata->last_success = addr;
128 * Round up the beginning of the address.
130 start = (addr + PAGE_SIZE-1) / PAGE_SIZE;
131 sidx = start - (bdata->node_boot_start/PAGE_SIZE);
133 for (i = sidx; i < eidx; i++) {
134 if (unlikely(!test_and_clear_bit(i, bdata->node_bootmem_map)))
140 * We 'merge' subsequent allocations to save space. We might 'lose'
141 * some fraction of a page if allocations cannot be satisfied due to
142 * size constraints on boxes where there is physical RAM space
143 * fragmentation - in these cases (mostly large memory boxes) this
146 * On low memory boxes we get it right in 100% of the cases.
148 * alignment has to be a power of 2 value.
150 * NOTE: This function is _not_ reentrant.
153 __alloc_bootmem_core(struct bootmem_data *bdata, unsigned long size,
154 unsigned long align, unsigned long goal)
156 unsigned long offset, remaining_size, areasize, preferred;
157 unsigned long i, start = 0, incr, eidx;
161 printk("__alloc_bootmem_core(): zero-sized request\n");
164 BUG_ON(align & (align-1));
166 eidx = bdata->node_low_pfn - (bdata->node_boot_start >> PAGE_SHIFT);
169 (bdata->node_boot_start & (align - 1UL)) != 0)
170 offset = (align - (bdata->node_boot_start & (align - 1UL)));
171 offset >>= PAGE_SHIFT;
174 * We try to allocate bootmem pages above 'goal'
175 * first, then we try to allocate lower pages.
177 if (goal && (goal >= bdata->node_boot_start) &&
178 ((goal >> PAGE_SHIFT) < bdata->node_low_pfn)) {
179 preferred = goal - bdata->node_boot_start;
181 if (bdata->last_success >= preferred)
182 preferred = bdata->last_success;
186 preferred = ((preferred + align - 1) & ~(align - 1)) >> PAGE_SHIFT;
188 areasize = (size+PAGE_SIZE-1)/PAGE_SIZE;
189 incr = align >> PAGE_SHIFT ? : 1;
192 for (i = preferred; i < eidx; i += incr) {
194 i = find_next_zero_bit(bdata->node_bootmem_map, eidx, i);
196 if (test_bit(i, bdata->node_bootmem_map))
198 for (j = i + 1; j < i + areasize; ++j) {
201 if (test_bit (j, bdata->node_bootmem_map))
210 if (preferred > offset) {
217 bdata->last_success = start << PAGE_SHIFT;
218 BUG_ON(start >= eidx);
221 * Is the next page of the previous allocation-end the start
222 * of this allocation's buffer? If yes then we can 'merge'
223 * the previous partial page with this allocation.
225 if (align < PAGE_SIZE &&
226 bdata->last_offset && bdata->last_pos+1 == start) {
227 offset = (bdata->last_offset+align-1) & ~(align-1);
228 BUG_ON(offset > PAGE_SIZE);
229 remaining_size = PAGE_SIZE-offset;
230 if (size < remaining_size) {
232 /* last_pos unchanged */
233 bdata->last_offset = offset+size;
234 ret = phys_to_virt(bdata->last_pos*PAGE_SIZE + offset +
235 bdata->node_boot_start);
237 remaining_size = size - remaining_size;
238 areasize = (remaining_size+PAGE_SIZE-1)/PAGE_SIZE;
239 ret = phys_to_virt(bdata->last_pos*PAGE_SIZE + offset +
240 bdata->node_boot_start);
241 bdata->last_pos = start+areasize-1;
242 bdata->last_offset = remaining_size;
244 bdata->last_offset &= ~PAGE_MASK;
246 bdata->last_pos = start + areasize - 1;
247 bdata->last_offset = size & ~PAGE_MASK;
248 ret = phys_to_virt(start * PAGE_SIZE + bdata->node_boot_start);
252 * Reserve the area now:
254 for (i = start; i < start+areasize; i++)
255 if (unlikely(test_and_set_bit(i, bdata->node_bootmem_map)))
257 memset(ret, 0, size);
261 static unsigned long __init free_all_bootmem_core(pg_data_t *pgdat)
264 bootmem_data_t *bdata = pgdat->bdata;
265 unsigned long i, count, total = 0;
270 BUG_ON(!bdata->node_bootmem_map);
273 /* first extant page of the node */
274 page = virt_to_page(phys_to_virt(bdata->node_boot_start));
275 idx = bdata->node_low_pfn - (bdata->node_boot_start >> PAGE_SHIFT);
276 map = bdata->node_bootmem_map;
277 /* Check physaddr is O(LOG2(BITS_PER_LONG)) page aligned */
278 if (bdata->node_boot_start == 0 ||
279 ffs(bdata->node_boot_start) - PAGE_SHIFT > ffs(BITS_PER_LONG))
281 for (i = 0; i < idx; ) {
282 unsigned long v = ~map[i / BITS_PER_LONG];
283 if (gofast && v == ~0UL) {
286 count += BITS_PER_LONG;
287 __ClearPageReserved(page);
288 set_page_count(page, 1);
289 for (j = 1; j < BITS_PER_LONG; j++) {
290 if (j + 16 < BITS_PER_LONG)
291 prefetchw(page + j + 16);
292 __ClearPageReserved(page + j);
294 __free_pages(page, ffs(BITS_PER_LONG)-1);
296 page += BITS_PER_LONG;
299 for (m = 1; m && i < idx; m<<=1, page++, i++) {
302 __ClearPageReserved(page);
303 set_page_count(page, 1);
309 page += BITS_PER_LONG;
315 * Now free the allocator bitmap itself, it's not
318 page = virt_to_page(bdata->node_bootmem_map);
320 for (i = 0; i < ((bdata->node_low_pfn-(bdata->node_boot_start >> PAGE_SHIFT))/8 + PAGE_SIZE-1)/PAGE_SIZE; i++,page++) {
322 __ClearPageReserved(page);
323 set_page_count(page, 1);
327 bdata->node_bootmem_map = NULL;
332 unsigned long __init init_bootmem_node (pg_data_t *pgdat, unsigned long freepfn, unsigned long startpfn, unsigned long endpfn)
334 return(init_bootmem_core(pgdat, freepfn, startpfn, endpfn));
337 void __init reserve_bootmem_node (pg_data_t *pgdat, unsigned long physaddr, unsigned long size)
339 reserve_bootmem_core(pgdat->bdata, physaddr, size);
342 void __init free_bootmem_node (pg_data_t *pgdat, unsigned long physaddr, unsigned long size)
344 free_bootmem_core(pgdat->bdata, physaddr, size);
347 unsigned long __init free_all_bootmem_node (pg_data_t *pgdat)
349 return(free_all_bootmem_core(pgdat));
352 unsigned long __init init_bootmem (unsigned long start, unsigned long pages)
356 return(init_bootmem_core(NODE_DATA(0), start, 0, pages));
359 #ifndef CONFIG_HAVE_ARCH_BOOTMEM_NODE
360 void __init reserve_bootmem (unsigned long addr, unsigned long size)
362 reserve_bootmem_core(NODE_DATA(0)->bdata, addr, size);
364 #endif /* !CONFIG_HAVE_ARCH_BOOTMEM_NODE */
366 void __init free_bootmem (unsigned long addr, unsigned long size)
368 free_bootmem_core(NODE_DATA(0)->bdata, addr, size);
371 unsigned long __init free_all_bootmem (void)
373 return(free_all_bootmem_core(NODE_DATA(0)));
376 void * __init __alloc_bootmem (unsigned long size, unsigned long align, unsigned long goal)
378 pg_data_t *pgdat = pgdat_list;
381 for_each_pgdat(pgdat)
382 if ((ptr = __alloc_bootmem_core(pgdat->bdata, size,
387 * Whoops, we cannot satisfy the allocation request.
389 printk(KERN_ALERT "bootmem alloc of %lu bytes failed!\n", size);
390 panic("Out of memory");
394 void * __init __alloc_bootmem_node (pg_data_t *pgdat, unsigned long size, unsigned long align, unsigned long goal)
398 ptr = __alloc_bootmem_core(pgdat->bdata, size, align, goal);
402 return __alloc_bootmem(size, align, goal);