4 * Copyright (C) 1999 Ingo Molnar
5 * Discontiguous memory support, Kanoj Sarcar, SGI, Nov 1999
7 * simple boot-time physical memory area allocator and
8 * free memory collector. It's used to deal with reserved
9 * system memory and memory holes as well.
13 #include <linux/kernel_stat.h>
14 #include <linux/swap.h>
15 #include <linux/interrupt.h>
16 #include <linux/init.h>
17 #include <linux/bootmem.h>
18 #include <linux/mmzone.h>
19 #include <linux/module.h>
25 * Access to this subsystem has to be serialized externally. (this is
26 * true for the boot process anyway)
28 unsigned long max_low_pfn;
29 unsigned long min_low_pfn;
30 EXPORT_SYMBOL(min_low_pfn);
31 unsigned long max_pfn;
33 * If we have booted due to a crash, max_pfn will be a very low value. We need
34 * to know the amount of memory that the previous kernel used.
36 unsigned long saved_max_pfn;
38 EXPORT_SYMBOL(max_pfn); /* This is exported so
39 * dma_get_required_mask(), which uses
40 * it, can be an inline function */
42 /* return the number of _pages_ that will be allocated for the boot bitmap */
43 unsigned long __init bootmem_bootmap_pages (unsigned long pages)
45 unsigned long mapsize;
47 mapsize = (pages+7)/8;
48 mapsize = (mapsize + ~PAGE_MASK) & PAGE_MASK;
49 mapsize >>= PAGE_SHIFT;
55 * Called once to set up the allocator itself.
57 static unsigned long __init init_bootmem_core (pg_data_t *pgdat,
58 unsigned long mapstart, unsigned long start, unsigned long end)
60 bootmem_data_t *bdata = pgdat->bdata;
61 unsigned long mapsize = ((end - start)+7)/8;
63 pgdat->pgdat_next = pgdat_list;
66 mapsize = (mapsize + (sizeof(long) - 1UL)) & ~(sizeof(long) - 1UL);
67 bdata->node_bootmem_map = phys_to_virt(mapstart << PAGE_SHIFT);
68 bdata->node_boot_start = (start << PAGE_SHIFT);
69 bdata->node_low_pfn = end;
72 * Initially all pages are reserved - setup_arch() has to
73 * register free RAM areas explicitly.
75 memset(bdata->node_bootmem_map, 0xff, mapsize);
81 * Marks a particular physical memory range as unallocatable. Usable RAM
82 * might be used for boot-time allocations - or it might get added
83 * to the free page pool later on.
85 static void __init reserve_bootmem_core(bootmem_data_t *bdata, unsigned long addr, unsigned long size)
89 * round up, partially reserved pages are considered
92 unsigned long sidx = (addr - bdata->node_boot_start)/PAGE_SIZE;
93 unsigned long eidx = (addr + size - bdata->node_boot_start +
94 PAGE_SIZE-1)/PAGE_SIZE;
95 unsigned long end = (addr + size + PAGE_SIZE-1)/PAGE_SIZE;
99 BUG_ON((addr >> PAGE_SHIFT) >= bdata->node_low_pfn);
100 BUG_ON(end > bdata->node_low_pfn);
102 for (i = sidx; i < eidx; i++)
103 if (test_and_set_bit(i, bdata->node_bootmem_map)) {
104 #ifdef CONFIG_DEBUG_BOOTMEM
105 printk("hm, page %08lx reserved twice.\n", i*PAGE_SIZE);
110 static void __init free_bootmem_core(bootmem_data_t *bdata, unsigned long addr, unsigned long size)
115 * round down end of usable mem, partially free pages are
116 * considered reserved.
119 unsigned long eidx = (addr + size - bdata->node_boot_start)/PAGE_SIZE;
120 unsigned long end = (addr + size)/PAGE_SIZE;
123 BUG_ON(end > bdata->node_low_pfn);
125 if (addr < bdata->last_success)
126 bdata->last_success = addr;
129 * Round up the beginning of the address.
131 start = (addr + PAGE_SIZE-1) / PAGE_SIZE;
132 sidx = start - (bdata->node_boot_start/PAGE_SIZE);
134 for (i = sidx; i < eidx; i++) {
135 if (unlikely(!test_and_clear_bit(i, bdata->node_bootmem_map)))
141 * We 'merge' subsequent allocations to save space. We might 'lose'
142 * some fraction of a page if allocations cannot be satisfied due to
143 * size constraints on boxes where there is physical RAM space
144 * fragmentation - in these cases (mostly large memory boxes) this
147 * On low memory boxes we get it right in 100% of the cases.
149 * alignment has to be a power of 2 value.
151 * NOTE: This function is _not_ reentrant.
154 __alloc_bootmem_core(struct bootmem_data *bdata, unsigned long size,
155 unsigned long align, unsigned long goal)
157 unsigned long offset, remaining_size, areasize, preferred;
158 unsigned long i, start = 0, incr, eidx;
162 printk("__alloc_bootmem_core(): zero-sized request\n");
165 BUG_ON(align & (align-1));
167 eidx = bdata->node_low_pfn - (bdata->node_boot_start >> PAGE_SHIFT);
170 (bdata->node_boot_start & (align - 1UL)) != 0)
171 offset = (align - (bdata->node_boot_start & (align - 1UL)));
172 offset >>= PAGE_SHIFT;
175 * We try to allocate bootmem pages above 'goal'
176 * first, then we try to allocate lower pages.
178 if (goal && (goal >= bdata->node_boot_start) &&
179 ((goal >> PAGE_SHIFT) < bdata->node_low_pfn)) {
180 preferred = goal - bdata->node_boot_start;
182 if (bdata->last_success >= preferred)
183 preferred = bdata->last_success;
187 preferred = ((preferred + align - 1) & ~(align - 1)) >> PAGE_SHIFT;
189 areasize = (size+PAGE_SIZE-1)/PAGE_SIZE;
190 incr = align >> PAGE_SHIFT ? : 1;
193 for (i = preferred; i < eidx; i += incr) {
195 i = find_next_zero_bit(bdata->node_bootmem_map, eidx, i);
197 if (test_bit(i, bdata->node_bootmem_map))
199 for (j = i + 1; j < i + areasize; ++j) {
202 if (test_bit (j, bdata->node_bootmem_map))
211 if (preferred > offset) {
218 bdata->last_success = start << PAGE_SHIFT;
219 BUG_ON(start >= eidx);
222 * Is the next page of the previous allocation-end the start
223 * of this allocation's buffer? If yes then we can 'merge'
224 * the previous partial page with this allocation.
226 if (align < PAGE_SIZE &&
227 bdata->last_offset && bdata->last_pos+1 == start) {
228 offset = (bdata->last_offset+align-1) & ~(align-1);
229 BUG_ON(offset > PAGE_SIZE);
230 remaining_size = PAGE_SIZE-offset;
231 if (size < remaining_size) {
233 /* last_pos unchanged */
234 bdata->last_offset = offset+size;
235 ret = phys_to_virt(bdata->last_pos*PAGE_SIZE + offset +
236 bdata->node_boot_start);
238 remaining_size = size - remaining_size;
239 areasize = (remaining_size+PAGE_SIZE-1)/PAGE_SIZE;
240 ret = phys_to_virt(bdata->last_pos*PAGE_SIZE + offset +
241 bdata->node_boot_start);
242 bdata->last_pos = start+areasize-1;
243 bdata->last_offset = remaining_size;
245 bdata->last_offset &= ~PAGE_MASK;
247 bdata->last_pos = start + areasize - 1;
248 bdata->last_offset = size & ~PAGE_MASK;
249 ret = phys_to_virt(start * PAGE_SIZE + bdata->node_boot_start);
253 * Reserve the area now:
255 for (i = start; i < start+areasize; i++)
256 if (unlikely(test_and_set_bit(i, bdata->node_bootmem_map)))
258 memset(ret, 0, size);
262 static unsigned long __init free_all_bootmem_core(pg_data_t *pgdat)
265 bootmem_data_t *bdata = pgdat->bdata;
266 unsigned long i, count, total = 0;
271 BUG_ON(!bdata->node_bootmem_map);
274 /* first extant page of the node */
275 page = virt_to_page(phys_to_virt(bdata->node_boot_start));
276 idx = bdata->node_low_pfn - (bdata->node_boot_start >> PAGE_SHIFT);
277 map = bdata->node_bootmem_map;
278 /* Check physaddr is O(LOG2(BITS_PER_LONG)) page aligned */
279 if (bdata->node_boot_start == 0 ||
280 ffs(bdata->node_boot_start) - PAGE_SHIFT > ffs(BITS_PER_LONG))
282 for (i = 0; i < idx; ) {
283 unsigned long v = ~map[i / BITS_PER_LONG];
284 if (gofast && v == ~0UL) {
287 count += BITS_PER_LONG;
288 __ClearPageReserved(page);
289 order = ffs(BITS_PER_LONG) - 1;
290 set_page_refs(page, order);
291 for (j = 1; j < BITS_PER_LONG; j++) {
292 if (j + 16 < BITS_PER_LONG)
293 prefetchw(page + j + 16);
294 __ClearPageReserved(page + j);
296 __free_pages(page, order);
298 page += BITS_PER_LONG;
301 for (m = 1; m && i < idx; m<<=1, page++, i++) {
304 __ClearPageReserved(page);
305 set_page_refs(page, 0);
311 page += BITS_PER_LONG;
317 * Now free the allocator bitmap itself, it's not
320 page = virt_to_page(bdata->node_bootmem_map);
322 for (i = 0; i < ((bdata->node_low_pfn-(bdata->node_boot_start >> PAGE_SHIFT))/8 + PAGE_SIZE-1)/PAGE_SIZE; i++,page++) {
324 __ClearPageReserved(page);
325 set_page_count(page, 1);
329 bdata->node_bootmem_map = NULL;
334 unsigned long __init init_bootmem_node (pg_data_t *pgdat, unsigned long freepfn, unsigned long startpfn, unsigned long endpfn)
336 return(init_bootmem_core(pgdat, freepfn, startpfn, endpfn));
339 void __init reserve_bootmem_node (pg_data_t *pgdat, unsigned long physaddr, unsigned long size)
341 reserve_bootmem_core(pgdat->bdata, physaddr, size);
344 void __init free_bootmem_node (pg_data_t *pgdat, unsigned long physaddr, unsigned long size)
346 free_bootmem_core(pgdat->bdata, physaddr, size);
349 unsigned long __init free_all_bootmem_node (pg_data_t *pgdat)
351 return(free_all_bootmem_core(pgdat));
354 unsigned long __init init_bootmem (unsigned long start, unsigned long pages)
358 return(init_bootmem_core(NODE_DATA(0), start, 0, pages));
361 #ifndef CONFIG_HAVE_ARCH_BOOTMEM_NODE
362 void __init reserve_bootmem (unsigned long addr, unsigned long size)
364 reserve_bootmem_core(NODE_DATA(0)->bdata, addr, size);
366 #endif /* !CONFIG_HAVE_ARCH_BOOTMEM_NODE */
368 void __init free_bootmem (unsigned long addr, unsigned long size)
370 free_bootmem_core(NODE_DATA(0)->bdata, addr, size);
373 unsigned long __init free_all_bootmem (void)
375 return(free_all_bootmem_core(NODE_DATA(0)));
378 void * __init __alloc_bootmem (unsigned long size, unsigned long align, unsigned long goal)
380 pg_data_t *pgdat = pgdat_list;
383 for_each_pgdat(pgdat)
384 if ((ptr = __alloc_bootmem_core(pgdat->bdata, size,
389 * Whoops, we cannot satisfy the allocation request.
391 printk(KERN_ALERT "bootmem alloc of %lu bytes failed!\n", size);
392 panic("Out of memory");
396 void * __init __alloc_bootmem_node (pg_data_t *pgdat, unsigned long size, unsigned long align, unsigned long goal)
400 ptr = __alloc_bootmem_core(pgdat->bdata, size, align, goal);
404 return __alloc_bootmem(size, align, goal);