4 * Copyright (C) 1999 Ingo Molnar
5 * Discontiguous memory support, Kanoj Sarcar, SGI, Nov 1999
7 * simple boot-time physical memory area allocator and
8 * free memory collector. It's used to deal with reserved
9 * system memory and memory holes as well.
13 #include <linux/kernel_stat.h>
14 #include <linux/swap.h>
15 #include <linux/interrupt.h>
16 #include <linux/init.h>
17 #include <linux/bootmem.h>
18 #include <linux/mmzone.h>
19 #include <linux/module.h>
24 * Access to this subsystem has to be serialized externally. (this is
25 * true for the boot process anyway)
27 unsigned long max_low_pfn;
28 unsigned long min_low_pfn;
29 unsigned long max_pfn;
31 EXPORT_SYMBOL(max_pfn); /* This is exported so
32 * dma_get_required_mask(), which uses
33 * it, can be an inline function */
35 /* return the number of _pages_ that will be allocated for the boot bitmap */
36 unsigned long __init bootmem_bootmap_pages (unsigned long pages)
38 unsigned long mapsize;
40 mapsize = (pages+7)/8;
41 mapsize = (mapsize + ~PAGE_MASK) & PAGE_MASK;
42 mapsize >>= PAGE_SHIFT;
48 * Called once to set up the allocator itself.
50 static unsigned long __init init_bootmem_core (pg_data_t *pgdat,
51 unsigned long mapstart, unsigned long start, unsigned long end)
53 bootmem_data_t *bdata = pgdat->bdata;
54 unsigned long mapsize = ((end - start)+7)/8;
56 pgdat->pgdat_next = pgdat_list;
59 mapsize = (mapsize + (sizeof(long) - 1UL)) & ~(sizeof(long) - 1UL);
60 bdata->node_bootmem_map = phys_to_virt(mapstart << PAGE_SHIFT);
61 bdata->node_boot_start = (start << PAGE_SHIFT);
62 bdata->node_low_pfn = end;
65 * Initially all pages are reserved - setup_arch() has to
66 * register free RAM areas explicitly.
68 memset(bdata->node_bootmem_map, 0xff, mapsize);
74 * Marks a particular physical memory range as unallocatable. Usable RAM
75 * might be used for boot-time allocations - or it might get added
76 * to the free page pool later on.
78 static void __init reserve_bootmem_core(bootmem_data_t *bdata, unsigned long addr, unsigned long size)
82 * round up, partially reserved pages are considered
85 unsigned long sidx = (addr - bdata->node_boot_start)/PAGE_SIZE;
86 unsigned long eidx = (addr + size - bdata->node_boot_start +
87 PAGE_SIZE-1)/PAGE_SIZE;
88 unsigned long end = (addr + size + PAGE_SIZE-1)/PAGE_SIZE;
92 BUG_ON((addr >> PAGE_SHIFT) >= bdata->node_low_pfn);
93 BUG_ON(end > bdata->node_low_pfn);
95 for (i = sidx; i < eidx; i++)
96 if (test_and_set_bit(i, bdata->node_bootmem_map)) {
97 #ifdef CONFIG_DEBUG_BOOTMEM
98 printk("hm, page %08lx reserved twice.\n", i*PAGE_SIZE);
103 static void __init free_bootmem_core(bootmem_data_t *bdata, unsigned long addr, unsigned long size)
108 * round down end of usable mem, partially free pages are
109 * considered reserved.
112 unsigned long eidx = (addr + size - bdata->node_boot_start)/PAGE_SIZE;
113 unsigned long end = (addr + size)/PAGE_SIZE;
116 BUG_ON(end > bdata->node_low_pfn);
118 if (addr < bdata->last_success)
119 bdata->last_success = addr;
122 * Round up the beginning of the address.
124 start = (addr + PAGE_SIZE-1) / PAGE_SIZE;
125 sidx = start - (bdata->node_boot_start/PAGE_SIZE);
127 for (i = sidx; i < eidx; i++) {
128 if (unlikely(!test_and_clear_bit(i, bdata->node_bootmem_map)))
134 * We 'merge' subsequent allocations to save space. We might 'lose'
135 * some fraction of a page if allocations cannot be satisfied due to
136 * size constraints on boxes where there is physical RAM space
137 * fragmentation - in these cases (mostly large memory boxes) this
140 * On low memory boxes we get it right in 100% of the cases.
142 * alignment has to be a power of 2 value.
144 * NOTE: This function is _not_ reentrant.
147 __alloc_bootmem_core(struct bootmem_data *bdata, unsigned long size,
148 unsigned long align, unsigned long goal)
150 unsigned long offset, remaining_size, areasize, preferred;
151 unsigned long i, start = 0, incr, eidx;
155 printk("__alloc_bootmem_core(): zero-sized request\n");
158 BUG_ON(align & (align-1));
160 eidx = bdata->node_low_pfn - (bdata->node_boot_start >> PAGE_SHIFT);
163 (bdata->node_boot_start & (align - 1UL)) != 0)
164 offset = (align - (bdata->node_boot_start & (align - 1UL)));
165 offset >>= PAGE_SHIFT;
168 * We try to allocate bootmem pages above 'goal'
169 * first, then we try to allocate lower pages.
171 if (goal && (goal >= bdata->node_boot_start) &&
172 ((goal >> PAGE_SHIFT) < bdata->node_low_pfn)) {
173 preferred = goal - bdata->node_boot_start;
175 if (bdata->last_success >= preferred)
176 preferred = bdata->last_success;
180 preferred = ((preferred + align - 1) & ~(align - 1)) >> PAGE_SHIFT;
182 areasize = (size+PAGE_SIZE-1)/PAGE_SIZE;
183 incr = align >> PAGE_SHIFT ? : 1;
186 for (i = preferred; i < eidx; i += incr) {
188 i = find_next_zero_bit(bdata->node_bootmem_map, eidx, i);
190 if (test_bit(i, bdata->node_bootmem_map))
192 for (j = i + 1; j < i + areasize; ++j) {
195 if (test_bit (j, bdata->node_bootmem_map))
204 if (preferred > offset) {
211 bdata->last_success = start << PAGE_SHIFT;
212 BUG_ON(start >= eidx);
215 * Is the next page of the previous allocation-end the start
216 * of this allocation's buffer? If yes then we can 'merge'
217 * the previous partial page with this allocation.
219 if (align < PAGE_SIZE &&
220 bdata->last_offset && bdata->last_pos+1 == start) {
221 offset = (bdata->last_offset+align-1) & ~(align-1);
222 BUG_ON(offset > PAGE_SIZE);
223 remaining_size = PAGE_SIZE-offset;
224 if (size < remaining_size) {
226 /* last_pos unchanged */
227 bdata->last_offset = offset+size;
228 ret = phys_to_virt(bdata->last_pos*PAGE_SIZE + offset +
229 bdata->node_boot_start);
231 remaining_size = size - remaining_size;
232 areasize = (remaining_size+PAGE_SIZE-1)/PAGE_SIZE;
233 ret = phys_to_virt(bdata->last_pos*PAGE_SIZE + offset +
234 bdata->node_boot_start);
235 bdata->last_pos = start+areasize-1;
236 bdata->last_offset = remaining_size;
238 bdata->last_offset &= ~PAGE_MASK;
240 bdata->last_pos = start + areasize - 1;
241 bdata->last_offset = size & ~PAGE_MASK;
242 ret = phys_to_virt(start * PAGE_SIZE + bdata->node_boot_start);
246 * Reserve the area now:
248 for (i = start; i < start+areasize; i++)
249 if (unlikely(test_and_set_bit(i, bdata->node_bootmem_map)))
251 memset(ret, 0, size);
255 static unsigned long __init free_all_bootmem_core(pg_data_t *pgdat)
258 bootmem_data_t *bdata = pgdat->bdata;
259 unsigned long i, count, total = 0;
264 BUG_ON(!bdata->node_bootmem_map);
267 /* first extant page of the node */
268 page = virt_to_page(phys_to_virt(bdata->node_boot_start));
269 idx = bdata->node_low_pfn - (bdata->node_boot_start >> PAGE_SHIFT);
270 map = bdata->node_bootmem_map;
271 /* Check physaddr is O(LOG2(BITS_PER_LONG)) page aligned */
272 if (bdata->node_boot_start == 0 ||
273 ffs(bdata->node_boot_start) - PAGE_SHIFT > ffs(BITS_PER_LONG))
275 for (i = 0; i < idx; ) {
276 unsigned long v = ~map[i / BITS_PER_LONG];
277 if (gofast && v == ~0UL) {
280 count += BITS_PER_LONG;
281 __ClearPageReserved(page);
282 set_page_count(page, 1);
283 for (j = 1; j < BITS_PER_LONG; j++) {
284 if (j + 16 < BITS_PER_LONG)
285 prefetchw(page + j + 16);
286 __ClearPageReserved(page + j);
288 __free_pages(page, ffs(BITS_PER_LONG)-1);
290 page += BITS_PER_LONG;
293 for (m = 1; m && i < idx; m<<=1, page++, i++) {
296 __ClearPageReserved(page);
297 set_page_count(page, 1);
303 page += BITS_PER_LONG;
309 * Now free the allocator bitmap itself, it's not
312 page = virt_to_page(bdata->node_bootmem_map);
314 for (i = 0; i < ((bdata->node_low_pfn-(bdata->node_boot_start >> PAGE_SHIFT))/8 + PAGE_SIZE-1)/PAGE_SIZE; i++,page++) {
316 __ClearPageReserved(page);
317 set_page_count(page, 1);
321 bdata->node_bootmem_map = NULL;
326 unsigned long __init init_bootmem_node (pg_data_t *pgdat, unsigned long freepfn, unsigned long startpfn, unsigned long endpfn)
328 return(init_bootmem_core(pgdat, freepfn, startpfn, endpfn));
331 void __init reserve_bootmem_node (pg_data_t *pgdat, unsigned long physaddr, unsigned long size)
333 reserve_bootmem_core(pgdat->bdata, physaddr, size);
336 void __init free_bootmem_node (pg_data_t *pgdat, unsigned long physaddr, unsigned long size)
338 free_bootmem_core(pgdat->bdata, physaddr, size);
341 unsigned long __init free_all_bootmem_node (pg_data_t *pgdat)
343 return(free_all_bootmem_core(pgdat));
346 #ifndef CONFIG_DISCONTIGMEM
347 unsigned long __init init_bootmem (unsigned long start, unsigned long pages)
351 return(init_bootmem_core(&contig_page_data, start, 0, pages));
354 #ifndef CONFIG_HAVE_ARCH_BOOTMEM_NODE
355 void __init reserve_bootmem (unsigned long addr, unsigned long size)
357 reserve_bootmem_core(contig_page_data.bdata, addr, size);
359 #endif /* !CONFIG_HAVE_ARCH_BOOTMEM_NODE */
361 void __init free_bootmem (unsigned long addr, unsigned long size)
363 free_bootmem_core(contig_page_data.bdata, addr, size);
366 unsigned long __init free_all_bootmem (void)
368 return(free_all_bootmem_core(&contig_page_data));
370 #endif /* !CONFIG_DISCONTIGMEM */
372 void * __init __alloc_bootmem (unsigned long size, unsigned long align, unsigned long goal)
374 pg_data_t *pgdat = pgdat_list;
377 for_each_pgdat(pgdat)
378 if ((ptr = __alloc_bootmem_core(pgdat->bdata, size,
383 * Whoops, we cannot satisfy the allocation request.
385 printk(KERN_ALERT "bootmem alloc of %lu bytes failed!\n", size);
386 panic("Out of memory");
390 void * __init __alloc_bootmem_node (pg_data_t *pgdat, unsigned long size, unsigned long align, unsigned long goal)
394 ptr = __alloc_bootmem_core(pgdat->bdata, size, align, goal);
398 return __alloc_bootmem(size, align, goal);