2 * Handle the memory map.
3 * The functions here do the job until bootmem takes over.
4 * $Id: e820.c,v 1.4 2002/09/19 19:25:32 ak Exp $
6 #include <linux/config.h>
7 #include <linux/kernel.h>
8 #include <linux/types.h>
9 #include <linux/init.h>
10 #include <linux/bootmem.h>
11 #include <linux/ioport.h>
12 #include <linux/string.h>
15 #include <asm/proto.h>
16 #include <asm/bootsetup.h>
19 * PFN of last memory page.
21 unsigned long end_pfn;
22 unsigned long end_user_pfn = MAXMEM>>PAGE_SHIFT;
23 unsigned long end_pfn_map;
26 * Add a memory region to the kernel e820 map.
28 void __init add_memory_region(unsigned long start, unsigned long size, int type)
33 printk(KERN_ERR "Ooops! Too many entries in the memory map!\n");
37 e820.map[x].addr = start;
38 e820.map[x].size = size;
39 e820.map[x].type = type;
47 * end_pfn only includes RAM, while end_pfn_map includes all e820 entries.
48 * The direct mapping extends to end_pfn_map, so that we can directly access
49 * apertures, ACPI and other tables without having to play with fixmaps.
53 * Last pfn which the user wants to use.
56 extern struct resource code_resource, data_resource;
58 /* Check for some hardcoded bad areas that early boot is not allowed to touch */
59 static inline int bad_addr(unsigned long *addrp, unsigned long size)
61 unsigned long addr = *addrp, last = addr + size;
63 /* various gunk below that needed for SMP startup */
69 /* direct mapping tables of the kernel */
70 if (last >= table_start<<PAGE_SHIFT && addr < table_end<<PAGE_SHIFT) {
71 *addrp = table_end << PAGE_SHIFT;
76 #ifdef CONFIG_BLK_DEV_INITRD
77 if (LOADER_TYPE && INITRD_START && last >= INITRD_START &&
78 addr < INITRD_START+INITRD_SIZE) {
79 *addrp = INITRD_START + INITRD_SIZE;
83 /* kernel code + 640k memory hole (later should not be needed, but
84 be paranoid for now) */
85 if (last >= 640*1024 && addr < __pa_symbol(&_end)) {
86 *addrp = __pa_symbol(&_end);
89 /* XXX ramdisk image here? */
93 int __init e820_mapped(unsigned long start, unsigned long end, unsigned type)
96 for (i = 0; i < e820.nr_map; i++) {
97 struct e820entry *ei = &e820.map[i];
98 if (type && ei->type != type)
100 if (ei->addr >= end || ei->addr + ei->size < start)
108 * Find a free area in a specific range.
110 unsigned long __init find_e820_area(unsigned long start, unsigned long end, unsigned size)
113 for (i = 0; i < e820.nr_map; i++) {
114 struct e820entry *ei = &e820.map[i];
115 unsigned long addr = ei->addr, last;
116 if (ei->type != E820_RAM)
120 if (addr > ei->addr + ei->size)
122 while (bad_addr(&addr, size) && addr+size < ei->addr + ei->size)
125 if (last > ei->addr + ei->size)
135 * Free bootmem based on the e820 table for a node.
137 void __init e820_bootmem_free(pg_data_t *pgdat, unsigned long start,unsigned long end)
140 for (i = 0; i < e820.nr_map; i++) {
141 struct e820entry *ei = &e820.map[i];
142 unsigned long last, addr;
144 if (ei->type != E820_RAM ||
145 ei->addr+ei->size <= start ||
149 addr = round_up(ei->addr, PAGE_SIZE);
153 last = round_down(ei->addr + ei->size, PAGE_SIZE);
157 if (last > addr && last-addr >= PAGE_SIZE)
158 free_bootmem_node(pgdat, addr, last-addr);
163 * Find the highest page frame number we have available
165 unsigned long __init e820_end_of_ram(void)
168 unsigned long end_pfn = 0;
170 for (i = 0; i < e820.nr_map; i++) {
171 struct e820entry *ei = &e820.map[i];
172 unsigned long start, end;
174 start = round_up(ei->addr, PAGE_SIZE);
175 end = round_down(ei->addr + ei->size, PAGE_SIZE);
178 if (ei->type == E820_RAM) {
179 if (end > end_pfn<<PAGE_SHIFT)
180 end_pfn = end>>PAGE_SHIFT;
182 if (end > end_pfn_map<<PAGE_SHIFT)
183 end_pfn_map = end>>PAGE_SHIFT;
187 if (end_pfn > end_pfn_map)
188 end_pfn_map = end_pfn;
189 if (end_pfn_map > MAXMEM>>PAGE_SHIFT)
190 end_pfn_map = MAXMEM>>PAGE_SHIFT;
191 if (end_pfn > end_user_pfn)
192 end_pfn = end_user_pfn;
193 if (end_pfn > end_pfn_map)
194 end_pfn = end_pfn_map;
200 * Mark e820 reserved areas as busy for the resource manager.
202 void __init e820_reserve_resources(void)
205 for (i = 0; i < e820.nr_map; i++) {
206 struct resource *res;
207 if (e820.map[i].addr + e820.map[i].size > 0x100000000ULL)
209 res = alloc_bootmem_low(sizeof(struct resource));
210 switch (e820.map[i].type) {
211 case E820_RAM: res->name = "System RAM"; break;
212 case E820_ACPI: res->name = "ACPI Tables"; break;
213 case E820_NVS: res->name = "ACPI Non-volatile Storage"; break;
214 default: res->name = "reserved";
216 res->start = e820.map[i].addr;
217 res->end = res->start + e820.map[i].size - 1;
218 res->flags = IORESOURCE_MEM | IORESOURCE_BUSY;
219 request_resource(&iomem_resource, res);
220 if (e820.map[i].type == E820_RAM) {
222 * We don't know which RAM region contains kernel data,
223 * so we try it repeatedly and let the resource manager
226 request_resource(res, &code_resource);
227 request_resource(res, &data_resource);
233 void __init e820_print_map(char *who)
237 for (i = 0; i < e820.nr_map; i++) {
238 printk(" %s: %016Lx - %016Lx ", who,
239 (unsigned long long) e820.map[i].addr,
240 (unsigned long long) (e820.map[i].addr + e820.map[i].size));
241 switch (e820.map[i].type) {
242 case E820_RAM: printk("(usable)\n");
245 printk("(reserved)\n");
248 printk("(ACPI data)\n");
251 printk("(ACPI NVS)\n");
253 default: printk("type %u\n", e820.map[i].type);
260 * Sanitize the BIOS e820 map.
262 * Some e820 responses include overlapping entries. The following
263 * replaces the original e820 map with a new one, removing overlaps.
266 static int __init sanitize_e820_map(struct e820entry * biosmap, char * pnr_map)
268 struct change_member {
269 struct e820entry *pbios; /* pointer to original bios entry */
270 unsigned long long addr; /* address for this change point */
272 static struct change_member change_point_list[2*E820MAX] __initdata;
273 static struct change_member *change_point[2*E820MAX] __initdata;
274 static struct e820entry *overlap_list[E820MAX] __initdata;
275 static struct e820entry new_bios[E820MAX] __initdata;
276 struct change_member *change_tmp;
277 unsigned long current_type, last_type;
278 unsigned long long last_addr;
279 int chgidx, still_changing;
286 Visually we're performing the following (1,2,3,4 = memory types)...
288 Sample memory map (w/overlaps):
289 ____22__________________
290 ______________________4_
291 ____1111________________
292 _44_____________________
293 11111111________________
294 ____________________33__
295 ___________44___________
296 __________33333_________
297 ______________22________
298 ___________________2222_
299 _________111111111______
300 _____________________11_
301 _________________4______
303 Sanitized equivalent (no overlap):
304 1_______________________
305 _44_____________________
306 ___1____________________
307 ____22__________________
308 ______11________________
309 _________1______________
310 __________3_____________
311 ___________44___________
312 _____________33_________
313 _______________2________
314 ________________1_______
315 _________________4______
316 ___________________2____
317 ____________________33__
318 ______________________4_
321 /* if there's only one memory region, don't bother */
327 /* bail out if we find any unreasonable addresses in bios map */
328 for (i=0; i<old_nr; i++)
329 if (biosmap[i].addr + biosmap[i].size < biosmap[i].addr)
332 /* create pointers for initial change-point information (for sorting) */
333 for (i=0; i < 2*old_nr; i++)
334 change_point[i] = &change_point_list[i];
336 /* record all known change-points (starting and ending addresses) */
338 for (i=0; i < old_nr; i++) {
339 change_point[chgidx]->addr = biosmap[i].addr;
340 change_point[chgidx++]->pbios = &biosmap[i];
341 change_point[chgidx]->addr = biosmap[i].addr + biosmap[i].size;
342 change_point[chgidx++]->pbios = &biosmap[i];
345 /* sort change-point list by memory addresses (low -> high) */
347 while (still_changing) {
349 for (i=1; i < 2*old_nr; i++) {
350 /* if <current_addr> > <last_addr>, swap */
351 /* or, if current=<start_addr> & last=<end_addr>, swap */
352 if ((change_point[i]->addr < change_point[i-1]->addr) ||
353 ((change_point[i]->addr == change_point[i-1]->addr) &&
354 (change_point[i]->addr == change_point[i]->pbios->addr) &&
355 (change_point[i-1]->addr != change_point[i-1]->pbios->addr))
358 change_tmp = change_point[i];
359 change_point[i] = change_point[i-1];
360 change_point[i-1] = change_tmp;
366 /* create a new bios memory map, removing overlaps */
367 overlap_entries=0; /* number of entries in the overlap table */
368 new_bios_entry=0; /* index for creating new bios map entries */
369 last_type = 0; /* start with undefined memory type */
370 last_addr = 0; /* start with 0 as last starting address */
371 /* loop through change-points, determining affect on the new bios map */
372 for (chgidx=0; chgidx < 2*old_nr; chgidx++)
374 /* keep track of all overlapping bios entries */
375 if (change_point[chgidx]->addr == change_point[chgidx]->pbios->addr)
377 /* add map entry to overlap list (> 1 entry implies an overlap) */
378 overlap_list[overlap_entries++]=change_point[chgidx]->pbios;
382 /* remove entry from list (order independent, so swap with last) */
383 for (i=0; i<overlap_entries; i++)
385 if (overlap_list[i] == change_point[chgidx]->pbios)
386 overlap_list[i] = overlap_list[overlap_entries-1];
390 /* if there are overlapping entries, decide which "type" to use */
391 /* (larger value takes precedence -- 1=usable, 2,3,4,4+=unusable) */
393 for (i=0; i<overlap_entries; i++)
394 if (overlap_list[i]->type > current_type)
395 current_type = overlap_list[i]->type;
396 /* continue building up new bios map based on this information */
397 if (current_type != last_type) {
398 if (last_type != 0) {
399 new_bios[new_bios_entry].size =
400 change_point[chgidx]->addr - last_addr;
401 /* move forward only if the new size was non-zero */
402 if (new_bios[new_bios_entry].size != 0)
403 if (++new_bios_entry >= E820MAX)
404 break; /* no more space left for new bios entries */
406 if (current_type != 0) {
407 new_bios[new_bios_entry].addr = change_point[chgidx]->addr;
408 new_bios[new_bios_entry].type = current_type;
409 last_addr=change_point[chgidx]->addr;
411 last_type = current_type;
414 new_nr = new_bios_entry; /* retain count for new bios entries */
416 /* copy new bios mapping into original location */
417 memcpy(biosmap, new_bios, new_nr*sizeof(struct e820entry));
424 * Copy the BIOS e820 map into a safe place.
426 * Sanity-check it while we're at it..
428 * If we're lucky and live on a modern system, the setup code
429 * will have given us a memory map that we can use to properly
430 * set up memory. If we aren't, we'll fake a memory map.
432 * We check to see that the memory map contains at least 2 elements
433 * before we'll use it, because the detection code in setup.S may
434 * not be perfect and most every PC known to man has two memory
435 * regions: one from 0 to 640k, and one from 1mb up. (The IBM
436 * thinkpad 560x, for example, does not cooperate with the memory
439 static int __init copy_e820_map(struct e820entry * biosmap, int nr_map)
441 /* Only one memory region (or negative)? Ignore it */
446 unsigned long start = biosmap->addr;
447 unsigned long size = biosmap->size;
448 unsigned long end = start + size;
449 unsigned long type = biosmap->type;
451 /* Overflow in 64 bits? Ignore the memory map. */
456 * Some BIOSes claim RAM in the 640k - 1M region.
457 * Not right. Fix it up.
459 * This should be removed on Hammer which is supposed to not
460 * have non e820 covered ISA mappings there, but I had some strange
461 * problems so it stays for now. -AK
463 if (type == E820_RAM) {
464 if (start < 0x100000ULL && end > 0xA0000ULL) {
465 if (start < 0xA0000ULL)
466 add_memory_region(start, 0xA0000ULL-start, type);
467 if (end <= 0x100000ULL)
474 add_memory_region(start, size, type);
475 } while (biosmap++,--nr_map);
479 void __init setup_memory_region(void)
481 char *who = "BIOS-e820";
484 * Try to copy the BIOS-supplied E820-map.
486 * Otherwise fake a memory map; one section from 0k->640k,
487 * the next section from 1mb->appropriate_mem_k
489 sanitize_e820_map(E820_MAP, &E820_MAP_NR);
490 if (copy_e820_map(E820_MAP, E820_MAP_NR) < 0) {
491 unsigned long mem_size;
493 /* compare results from other methods and take the greater */
494 if (ALT_MEM_K < EXT_MEM_K) {
495 mem_size = EXT_MEM_K;
498 mem_size = ALT_MEM_K;
503 add_memory_region(0, LOWMEMSIZE(), E820_RAM);
504 add_memory_region(HIGH_MEMORY, mem_size << 10, E820_RAM);
506 printk(KERN_INFO "BIOS-provided physical RAM map:\n");
511 extern unsigned long xen_override_max_pfn;
513 void __init parse_memopt(char *p, char **from)
516 * mem=XXX[kKmM] limits kernel memory to XXX+1MB
518 * It would be more logical to count from 0 instead of from
519 * HIGH_MEMORY, but we keep that for now for i386 compatibility.
521 * No support for custom mapping like i386. The reason is
522 * that we need to read the e820 map anyways to handle the
523 * ACPI mappings in the direct map. Also on x86-64 there
524 * should be always a good e820 map. This is only an upper
525 * limit, you cannot force usage of memory not in e820.
529 end_user_pfn = memparse(p, from) + HIGH_MEMORY;
530 end_user_pfn >>= PAGE_SHIFT;
531 xen_override_max_pfn = (unsigned long) end_user_pfn;