4 * Loader for Multiboot-compliant kernels and modules.
6 * Copyright (C) 2005 Tim Deegan <Tim.Deegan@cl.cam.ac.uk>
7 * Parts based on GNU GRUB, Copyright (C) 2000 Free Software Foundation, Inc.
8 * Parts based on SYSLINUX, Copyright (C) 1994-2005 H. Peter Anvin.
10 * This program is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU General Public License as
12 * published by the Free Software Foundation; either version 2 of the
13 * License, or (at your option) any later version.
15 * This program is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18 * General Public License for more details.
20 * You should have received a copy of the GNU General Public License
21 * along with this program; if not, write to the Free Software
22 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA
39 #include "mb_header.h"
41 #include <klibc/compiler.h> /* For __constructor */
43 #define MIN(_x, _y) (((_x)<(_y))?(_x):(_y))
44 #define MAX(_x, _y) (((_x)>(_y))?(_x):(_y))
46 /* Define this for some more printout */
49 /* Memory magic numbers */
50 #define STACK_SIZE 0x20000 /* XXX Could be much smaller */
51 #define MALLOC_SIZE 0x100000 /* XXX Could be much smaller */
52 #define MIN_RUN_ADDR 0x10000 /* Lowest address we'll consider using */
53 #define MEM_HOLE_START 0xa0000 /* Memory hole runs from 640k ... */
54 #define MEM_HOLE_END 0x100000 /* ... to 1MB */
55 #define X86_PAGE_SIZE 0x1000
57 size_t __stack_size = STACK_SIZE; /* How much stack we'll use */
58 extern void *__mem_end; /* Start of malloc() heap */
59 extern char _end[]; /* End of static data */
61 /* Pointer to free memory for loading into: load area is between here
63 static char *next_load_addr;
65 /* Memory map for run-time */
66 typedef struct section section_t;
68 size_t dest; /* Start of run-time allocation */
69 char *src; /* Current location of data for memmove(),
70 * or NULL for bzero() */
71 size_t size; /* Length of allocation */
73 static char *section_addr;
74 static int section_count;
76 static size_t max_run_addr; /* Highest address we'll consider using */
77 static size_t next_mod_run_addr; /* Where the next module will be put */
79 /* File loads are in units of this much */
80 #define LOAD_CHUNK 0x20000
82 /* Layout of the input to the 32-bit lidt instruction */
84 unsigned int limit:16;
86 } __attribute__((packed));
89 static const char version_string[] = "COM32 Multiboot loader v0.1";
90 static const char copyright_string[] = "Copyright (C) 2005 Tim Deegan.";
91 static const char module_separator[] = "---";
95 * Start of day magic, run from __start during library init.
98 static void __constructor check_version(void)
99 /* Check the SYSLINUX version. Docs say we should be OK from v2.08,
100 * but in fact we crash on anything below v2.12 (when libc came in). */
102 com32sys_t regs_in, regs_out;
103 const char *p, *too_old = "Fatal: SYSLINUX image is too old; "
104 "mboot.c32 needs at least version 2.12.\r\n";
106 memset(®s_in, 0, sizeof(regs_in));
107 regs_in.eax.l = 0x0001; /* "Get version" */
108 __intcall(0x22, ®s_in, ®s_out);
109 if (regs_out.ecx.w[0] >= 0x020c) return;
111 /* Pointless: on older versions this print fails too. :( */
112 for (p = too_old ; *p ; p++) {
113 memset(®s_in, 0, sizeof(regs_in));
114 regs_in.eax.b[1] = 0x02; /* "Write character" */
115 regs_in.edx.b[0] = *p;
116 __intcall(0x21, ®s_in, ®s_out);
119 __intcall(0x20, ®s_in, ®s_out); /* "Terminate program" */
123 static void __constructor grab_memory(void)
124 /* Runs before init_memory_arena() (com32/lib/malloc.c) to let
125 * the malloc() code know how much space it's allowed to use.
126 * We don't use malloc() directly, but some of the library code
127 * does (zlib, for example). */
129 /* Find the stack pointer */
131 asm volatile("movl %%esp, %0" : "=r" (sp));
133 /* Initialize the allocation of *run-time* memory: don't let ourselves
134 * overwrite the stack during the relocation later. */
135 max_run_addr = (size_t) sp - (MALLOC_SIZE + STACK_SIZE);
137 /* Move the end-of-memory marker: malloc() will use only memory
138 * above __mem_end and below the stack. We will load files starting
139 * at the old __mem_end and working towards the new one, and allocate
140 * section descriptors at the top of that area, working down. */
141 next_load_addr = __mem_end;
142 section_addr = sp - (MALLOC_SIZE + STACK_SIZE);
145 /* But be careful not to move it the wrong direction if memory is
146 * tight. Instead we'll fail more gracefully later, when we try to
147 * load a file and find that next_load_addr > section_addr. */
148 __mem_end = MAX(section_addr, next_load_addr);
155 * Run-time memory map functions: allocating and recording allocations.
158 static int cmp_sections(const void *a, const void *b)
159 /* For sorting section descriptors by destination address */
161 const section_t *sa = a;
162 const section_t *sb = b;
163 if (sa->dest < sb->dest) return -1;
164 if (sa->dest > sb->dest) return 1;
169 static void add_section(size_t dest, char *src, size_t size)
170 /* Adds something to the list of sections to relocate. */
175 printf("SECTION: %#8.8x --> %#8.8x (%#x)\n", (size_t) src, dest, size);
178 section_addr -= sizeof (section_t);
179 if (section_addr < next_load_addr) {
180 printf("Fatal: out of memory allocating section descriptor.\n");
183 sec = (section_t *) section_addr;
190 /* Keep the list sorted */
191 qsort(sec, section_count, sizeof (section_t), cmp_sections);
195 static size_t place_low_section(size_t size, size_t align)
196 /* Find a space in the run-time memory map, below 640K */
200 section_t *sections = (section_t *) section_addr;
202 start = MIN_RUN_ADDR;
203 start = (start + (align-1)) & ~(align-1);
205 /* Section list is sorted by destination, so can do this in one pass */
206 for (i = 0; i < section_count; i++) {
207 if (sections[i].dest < start + size) {
208 /* Hit the bottom of this section */
209 start = sections[i].dest + sections[i].size;
210 start = (start + (align-1)) & ~(align-1);
213 if (start + size < MEM_HOLE_START) return start;
218 static size_t place_module_section(size_t size, size_t align)
219 /* Find a space in the run-time memory map for this module. */
221 /* Ideally we'd run through the sections looking for a free space
222 * like place_low_section() does, but some OSes (Xen, at least)
223 * assume that the bootloader has loaded all the modules
224 * consecutively, above the kernel. So, what we actually do is
225 * keep a pointer to the highest address allocated so far, and
226 * always allocate modules there. */
228 size_t start = next_mod_run_addr;
229 start = (start + (align-1)) & ~(align-1);
231 if (start + size > max_run_addr) return 0;
233 next_mod_run_addr = start + size;
238 static void place_kernel_section(size_t start, size_t size)
239 /* Allocate run-time space for part of the kernel, checking for
240 * sanity. We assume the kernel isn't broken enough to have
241 * overlapping segments. */
243 /* We always place modules above the kernel */
244 next_mod_run_addr = MAX(next_mod_run_addr, start + size);
246 if (start > max_run_addr || start + size > max_run_addr) {
247 /* Overruns the end of memory */
248 printf("Fatal: kernel loads too high (%#8.8x+%#x > %#8.8x).\n",
249 start, size, max_run_addr);
252 if (start >= MEM_HOLE_END) {
253 /* Above the memory hole: easy */
255 printf("Placed kernel section (%#8.8x+%#x)\n", start, size);
259 if (start >= MEM_HOLE_START) {
260 /* In the memory hole. Not so good */
261 printf("Fatal: kernel load address (%#8.8x) is in the memory hole.\n",
265 if (start + size > MEM_HOLE_START) {
266 /* Too big for low memory */
267 printf("Fatal: kernel (%#8.8x+%#x) runs into the memory hole.\n",
271 if (start < MIN_RUN_ADDR) {
273 printf("Fatal: kernel load address (%#8.8x) is too low (<%#8.8x).\n",
274 start, MIN_RUN_ADDR);
277 /* Kernel loads below the memory hole: OK */
279 printf("Placed kernel section (%#8.8x+%#x)\n", start, size);
284 static void reorder_sections(void)
285 /* Reorders sections into a safe order, where no relocation
286 * overwrites the source of a later one. */
288 section_t *secs = (section_t *) section_addr;
293 printf("Relocations:\n");
294 for (i = 0; i < section_count ; i++) {
295 printf(" %#8.8x --> %#8.8x (%#x)\n",
296 (size_t)secs[i].src, secs[i].dest, secs[i].size);
300 for (i = 0; i < section_count; i++) {
303 for (j = i + 1 ; j < section_count; j++) {
304 if (secs[j].src != NULL
305 && secs[i].dest + secs[i].size > (size_t) secs[j].src
306 && secs[i].dest < (size_t) secs[j].src + secs[j].size) {
307 /* Would overwrite the source of the later move */
308 if (++tries > section_count) {
310 /* XXX Try to break deadlocks? */
311 printf("Fatal: circular dependence in relocations.\n");
314 /* Swap these sections (using struct copies) */
315 tmp = secs[i]; secs[i] = secs[j]; secs[j] = tmp;
316 /* Start scanning again from the new secs[i]... */
323 printf("Relocations:\n");
324 for (i = 0; i < section_count ; i++) {
325 printf(" %#8.8x --> %#8.8x (%#x)\n",
326 (size_t)secs[i].src, secs[i].dest, secs[i].size);
332 static void init_mmap(struct multiboot_info *mbi)
333 /* Get a full memory map from the BIOS to pass to the kernel. */
335 com32sys_t regs_in, regs_out;
336 struct AddrRangeDesc *e820;
338 size_t mem_lower, mem_upper, run_addr, mmap_size;
341 /* Default values for mem_lower and mem_upper in case the BIOS won't
342 * tell us: 640K, and all memory up to the stack. */
343 asm volatile("movl %%esp, %0" : "=r" (sp));
344 mem_upper = (sp - MEM_HOLE_END) / 1024;
345 mem_lower = (MEM_HOLE_START) / 1024;
348 printf("Requesting memory map from BIOS:\n");
351 /* Ask the BIOS for the full memory map of the machine. We'll
352 * build it in Multiboot format (i.e. with size fields) in the
353 * bounce buffer, and then allocate some high memory to keep it in
354 * until boot time. */
355 e820 = __com32.cs_bounce;
359 while(((void *)(e820 + 1)) < __com32.cs_bounce + __com32.cs_bounce_size)
362 e820->size = sizeof(*e820) - sizeof(e820->size);
364 /* Ask the BIOS to fill in this descriptor */
365 regs_in.eax.l = 0xe820; /* "Get system memory map" */
366 regs_in.ebx.l = regs_out.ebx.l; /* Continuation value from last call */
367 regs_in.ecx.l = 20; /* Size of buffer to write into */
368 regs_in.edx.l = 0x534d4150; /* "SMAP" */
369 regs_in.es = SEG(&e820->BaseAddr);
370 regs_in.edi.w[0] = OFFS(&e820->BaseAddr);
371 __intcall(0x15, ®s_in, ®s_out);
373 if ((regs_out.eflags.l & EFLAGS_CF) != 0 && regs_out.ebx.l != 0)
374 break; /* End of map */
376 if (((regs_out.eflags.l & EFLAGS_CF) != 0 && regs_out.ebx.l == 0)
377 || (regs_out.eax.l != 0x534d4150))
380 printf("Error %x reading E820 memory map: %s.\n",
381 (int) regs_out.eax.b[0],
382 (regs_out.eax.b[0] == 0x80) ? "invalid command" :
383 (regs_out.eax.b[0] == 0x86) ? "not supported" :
390 printf(" %#16.16Lx -- %#16.16Lx : ",
391 e820->BaseAddr, e820->BaseAddr + e820->Length);
392 switch (e820->Type) {
393 case 1: printf("Available\n"); break;
394 case 2: printf("Reserved\n"); break;
395 case 3: printf("ACPI Reclaim\n"); break;
396 case 4: printf("ACPI NVS\n"); break;
397 default: printf("? (Reserved)\n"); break;
401 if (e820->Type == 1) {
402 if (e820->BaseAddr == 0) {
403 mem_lower = MIN(MEM_HOLE_START, e820->Length) / 1024;
404 } else if (e820->BaseAddr == MEM_HOLE_END) {
405 mem_upper = MIN(0xfff00000, e820->Length) / 1024;
409 /* Move to next slot */
414 if (regs_out.ebx.l == 0)
418 /* Record the simple information in the MBI */
419 mbi->flags |= MB_INFO_MEMORY;
420 mbi->mem_lower = mem_lower;
421 mbi->mem_upper = mem_upper;
423 /* Record the full memory map in the MBI */
424 if (e820_slots != 0) {
425 mmap_size = e820_slots * sizeof(*e820);
426 /* Where will it live at run time? */
427 run_addr = place_low_section(mmap_size, 1);
429 printf("Fatal: can't find space for the e820 mmap.\n");
432 /* Where will it live now? */
433 e820 = (struct AddrRangeDesc *) next_load_addr;
434 if (next_load_addr + mmap_size > section_addr) {
435 printf("Fatal: out of memory storing the e820 mmap.\n");
438 next_load_addr += mmap_size;
439 /* Copy it out of the bounce buffer */
440 memcpy(e820, __com32.cs_bounce, mmap_size);
441 /* Remember to copy it again at run time */
442 add_section(run_addr, (char *) e820, mmap_size);
443 /* Record it in the MBI */
444 mbi->flags |= MB_INFO_MEM_MAP;
445 mbi->mmap_length = mmap_size;
446 mbi->mmap_addr = run_addr;
454 * Code for loading and parsing files.
457 static void load_file(char *filename, char **startp, size_t *sizep)
458 /* Load a file into memory. Returns where it is and how big via
459 * startp and sizep */
465 printf("Loading %s.", filename);
467 start = next_load_addr;
472 if ((fp = gzopen(filename, "r")) == NULL) {
473 printf("\nFatal: cannot open %s\n", filename);
477 while (next_load_addr + LOAD_CHUNK <= section_addr) {
478 bsize = gzread(fp, next_load_addr, LOAD_CHUNK);
482 printf("\nFatal: read error in %s\n", filename);
487 next_load_addr += bsize;
490 if (bsize < LOAD_CHUNK) {
497 /* Running out of memory. Try and use up the last bit */
498 if (section_addr > next_load_addr) {
499 bsize = gzread(fp, next_load_addr, section_addr - next_load_addr);
507 printf("\nFatal: read error in %s\n", filename);
511 next_load_addr += bsize;
516 printf("\nFatal: out of memory reading %s\n", filename);
526 static size_t load_kernel(char *cmdline)
527 /* Load a multiboot/elf32 kernel and allocate run-time memory for it.
528 * Returns the kernel's entry address. */
531 char *load_addr; /* Where the image was loaded */
532 size_t load_size; /* How big it is */
533 char *seg_addr; /* Where a segment was loaded */
534 size_t seg_size, bss_size; /* How big it is */
535 size_t run_addr, run_size; /* Where it should be put */
539 struct multiboot_header *mbh;
541 printf("Kernel: %s\n", cmdline);
545 p = strchr(cmdline, ' ');
546 if (p != NULL) *p = 0;
547 load_file(cmdline, &load_addr, &load_size);
548 if (load_size < 12) {
549 printf("Fatal: %s is too short to be a multiboot kernel.",
553 if (p != NULL) *p = ' ';
556 /* Look for a multiboot header in the first 8k of the file */
557 for (i = 0; i <= MIN(load_size - 12, MULTIBOOT_SEARCH - 12); i += 4)
559 mbh = (struct multiboot_header *)(load_addr + i);
560 if (mbh->magic != MULTIBOOT_MAGIC
561 || ((mbh->magic+mbh->flags+mbh->checksum) & 0xffffffff))
563 /* Not a multiboot header */
566 if (mbh->flags & (MULTIBOOT_UNSUPPORTED | MULTIBOOT_VIDEO_MODE)) {
567 /* Requires options we don't support */
568 printf("Fatal: Kernel requires multiboot options "
569 "that I don't support: %#x.\n",
570 mbh->flags & (MULTIBOOT_UNSUPPORTED|MULTIBOOT_VIDEO_MODE));
574 /* This kernel will do: figure out where all the pieces will live */
576 if (mbh->flags & MULTIBOOT_AOUT_KLUDGE) {
578 /* Use the offsets in the multiboot header */
580 printf("Using multiboot header.\n");
583 /* Where is the code in the loaded file? */
584 seg_addr = ((char *)mbh) - (mbh->header_addr - mbh->load_addr);
586 /* How much code is there? */
587 run_addr = mbh->load_addr;
588 if (mbh->load_end_addr != 0)
589 seg_size = mbh->load_end_addr - mbh->load_addr;
591 seg_size = load_size - (seg_addr - load_addr);
593 /* How much memory will it take up? */
594 if (mbh->bss_end_addr != 0)
595 run_size = mbh->bss_end_addr - mbh->load_addr;
599 if (seg_size > run_size) {
600 printf("Fatal: can't put %i bytes of kernel into %i bytes "
601 "of memory.\n", seg_size, run_size);
604 if (seg_addr + seg_size > load_addr + load_size) {
605 printf("Fatal: multiboot load segment runs off the "
606 "end of the file.\n");
610 /* Does it fit where it wants to be? */
611 place_kernel_section(run_addr, run_size);
613 /* Put it on the relocation list */
614 if (seg_size < run_size) {
615 /* Set up the kernel BSS too */
617 add_section(run_addr, seg_addr, seg_size);
618 bss_size = run_size - seg_size;
619 add_section(run_addr + seg_size, NULL, bss_size);
622 add_section(run_addr, seg_addr, run_size);
626 return mbh->entry_addr;
630 /* Now look for an ELF32 header */
631 ehdr = (Elf32_Ehdr *)load_addr;
632 if (*(unsigned long *)ehdr != 0x464c457f
633 || ehdr->e_ident[EI_DATA] != ELFDATA2LSB
634 || ehdr->e_ident[EI_CLASS] != ELFCLASS32
635 || ehdr->e_machine != EM_386)
637 printf("Fatal: kernel has neither ELF32/x86 nor multiboot load"
641 if (ehdr->e_phoff + ehdr->e_phnum*sizeof (*phdr) > load_size) {
642 printf("Fatal: malformed ELF header overruns EOF.\n");
645 if (ehdr->e_phnum <= 0) {
646 printf("Fatal: ELF kernel has no program headers.\n");
651 printf("Using ELF header.\n");
654 if (ehdr->e_type != ET_EXEC
655 || ehdr->e_version != EV_CURRENT
656 || ehdr->e_phentsize != sizeof (Elf32_Phdr)) {
657 printf("Warning: funny-looking ELF header.\n");
659 phdr = (Elf32_Phdr *)(load_addr + ehdr->e_phoff);
661 /* Obey the program headers to load the kernel */
662 for(i = 0; i < ehdr->e_phnum; i++) {
664 /* How much is in this segment? */
665 run_size = phdr[i].p_memsz;
666 if (phdr[i].p_type != PT_LOAD)
669 seg_size = (size_t)phdr[i].p_filesz;
671 /* Where is it in the loaded file? */
672 seg_addr = load_addr + phdr[i].p_offset;
673 if (seg_addr + seg_size > load_addr + load_size) {
674 printf("Fatal: ELF load segment runs off the "
675 "end of the file.\n");
679 /* Skip segments that don't take up any memory */
680 if (run_size == 0) continue;
682 /* Place the segment where it wants to be */
683 run_addr = phdr[i].p_paddr;
684 place_kernel_section(run_addr, run_size);
686 /* Put it on the relocation list */
687 if (seg_size < run_size) {
688 /* Set up the kernel BSS too */
690 add_section(run_addr, seg_addr, seg_size);
691 bss_size = run_size - seg_size;
692 add_section(run_addr + seg_size, NULL, bss_size);
695 add_section(run_addr, seg_addr, run_size);
700 return ehdr->e_entry;
704 /* This is not a multiboot kernel */
705 printf("Fatal: not a multiboot kernel.\n");
711 static void load_module(struct mod_list *mod, char *cmdline)
712 /* Load a multiboot module and allocate a memory area for it */
715 size_t load_size, run_addr;
717 printf("Module: %s\n", cmdline);
721 p = strchr(cmdline, ' ');
722 if (p != NULL) *p = 0;
723 load_file(cmdline, &load_addr, &load_size);
724 if (p != NULL) *p = ' ';
726 /* Decide where it's going to live */
727 run_addr = place_module_section(load_size, X86_PAGE_SIZE);
729 printf("Fatal: can't find space for this module.\n");
732 add_section(run_addr, load_addr, load_size);
734 /* Remember where we put it */
735 mod->mod_start = run_addr;
736 mod->mod_end = run_addr + load_size;
740 printf("Placed module (%#8.8x+%#x)\n", run_addr, load_size);
748 * Code for shuffling sections into place and booting the new kernel
751 static void trampoline_start(section_t *secs, int sec_count,
752 size_t mbi_run_addr, size_t entry)
753 /* Final shuffle-and-boot code. Running on the stack; no external code
754 * or data can be relied on. */
757 struct lidt_operand idt;
759 /* SYSLINUX has set up SS, DS and ES as 32-bit 0--4G data segments,
760 * but doesn't specify FS and GS. Multiboot wants them all to be
761 * the same, so we'd better do that before we overwrite the GDT. */
762 asm volatile("movl %ds, %ecx; movl %ecx, %fs; movl %ecx, %gs");
764 /* Turn off interrupts */
767 /* SYSLINUX has set up an IDT at 0x100000 that does all the
768 * comboot calls, and we're about to overwrite it. The Multiboot
769 * spec says that the kernel must set up its own IDT before turning
770 * on interrupts, but it's still entitled to use BIOS calls, so we'll
771 * put the IDT back to the BIOS one at the base of memory. */
774 asm volatile("lidt %0" : : "m" (idt));
776 /* Now, shuffle the sections */
777 for (i = 0; i < sec_count; i++) {
778 if (secs[i].src == NULL) {
779 /* asm bzero() code from com32/lib/memset.c */
780 char *q = (char *) secs[i].dest;
781 size_t nl = secs[i].size >> 2;
782 asm volatile("cld ; rep ; stosl ; movl %3,%0 ; rep ; stosb"
783 : "+c" (nl), "+D" (q)
784 : "a" (0x0U), "r" (secs[i].size & 3));
786 /* asm memmove() code from com32/lib/memmove.c */
787 const char *p = secs[i].src;
788 char *q = (char *) secs[i].dest;
789 size_t n = secs[i].size;
791 asm volatile("cld ; rep ; movsb"
792 : "+c" (n), "+S" (p), "+D" (q));
796 asm volatile("std ; rep ; movsb"
797 : "+c" (n), "+S" (p), "+D" (q));
802 /* Now set up the last tiny bit of Multiboot environment... */
806 /* A20 is already enabled.
807 * CR0 already has PG cleared and PE set.
808 * EFLAGS already has VM and IF cleared.
809 * ESP is the kernels' problem.
810 * GDTR is the kernel's problem.
811 * CS is already a 32-bit, 0--4G code segments.
812 * DS, ES, FS and GS are already 32-bit, 0--4G data segments.
813 * EBX must point to the MBI: */
817 /* EAX must be the Multiboot magic number. */
819 "movl $0x2badb002, %%eax;"
821 /* Start the kernel. */
825 : : "m" (mbi_run_addr), "r" (entry));
828 static void trampoline_end(void) {}
831 static void boot(size_t mbi_run_addr, size_t entry)
832 /* Tidy up SYSLINUX, shuffle memory and boot the kernel */
835 section_t *tr_sections;
836 void (*trampoline)(section_t *, int, size_t, size_t);
837 size_t trampoline_size;
839 /* Make sure the relocations are safe. */
842 /* Copy the shuffle-and-boot code and the array of relocations
843 * onto the memory we previously used for malloc() heap. This is
844 * safe because it's not the source or the destination of any
845 * copies, and there'll be no more library calls after the copy. */
847 tr_sections = ((section_t *) section_addr) + section_count;
848 trampoline = (void *) (tr_sections + section_count);
849 trampoline_size = (void *)&trampoline_end - (void *)&trampoline_start;
852 printf("tr_sections: %p\n"
854 "trampoline_size: %#8.8x\n"
855 "max_run_addr: %#8.8x\n",
856 tr_sections, trampoline, trampoline_size, max_run_addr);
859 printf("Booting: MBI=%#8.8x, entry=%#8.8x\n", mbi_run_addr, entry);
861 memmove(tr_sections, section_addr, section_count * sizeof (section_t));
862 memmove(trampoline, trampoline_start, trampoline_size);
864 /* Tell SYSLINUX to clean up */
865 regs.eax.l = 0x000c; /* "Perform final cleanup" */
866 regs.edx.l = 0; /* "Normal cleanup" */
867 __intcall(0x22, ®s, NULL);
869 /* Into the unknown */
870 trampoline(tr_sections, section_count, mbi_run_addr, entry);
874 int main(int argc, char **argv)
875 /* Parse the command-line and invoke loaders */
877 struct multiboot_info *mbi;
878 struct mod_list *modp;
880 int mbi_reloc_offset;
882 size_t mbi_run_addr, mbi_size, entry;
887 printf("%s. %s\n", version_string, copyright_string);
889 if (argc < 2 || !strcmp(argv[1], module_separator)) {
890 printf("Fatal: No kernel filename!\n");
897 "next_load_addr: %p\n"
901 &_end, argv[1], next_load_addr, section_addr, __mem_end, argv[0]);
904 /* How much space will the MBI need? */
906 mbi_size = sizeof(struct multiboot_info) + strlen(version_string) + 5;
907 for (i = 1 ; i < argc ; i++) {
908 if (!strcmp(argv[i], module_separator)) {
910 mbi_size += sizeof(struct mod_list) + 1;
912 mbi_size += strlen(argv[i]) + 1;
916 /* Allocate space in the load buffer for the MBI, all the command
917 * lines, and all the module details. */
918 mbi = (struct multiboot_info *)next_load_addr;
919 next_load_addr += mbi_size;
920 if (next_load_addr > section_addr) {
921 printf("Fatal: out of memory allocating for boot metadata.\n");
924 memset(mbi, 0, sizeof (struct multiboot_info));
925 p = (char *)(mbi + 1);
926 mbi->flags = MB_INFO_CMDLINE | MB_INFO_BOOT_LOADER_NAME;
928 /* Figure out the memory map.
929 * N.B. Must happen before place_section() is called */
932 mbi_run_addr = place_low_section(mbi_size, 4);
933 if (mbi_run_addr == 0) {
934 printf("Fatal: can't find space for the MBI!\n");
937 mbi_reloc_offset = (size_t)mbi - mbi_run_addr;
938 add_section(mbi_run_addr, (void *)mbi, mbi_size);
940 /* Module info structs */
941 modp = (struct mod_list *) (((size_t)p + 3) & ~3);
942 if (modules > 0) mbi->flags |= MB_INFO_MODS;
943 mbi->mods_count = modules;
944 mbi->mods_addr = ((size_t)modp) - mbi_reloc_offset;
945 p = (char *)(modp + modules);
947 /* Command lines: first kernel, then modules */
948 mbi->cmdline = ((size_t)p) - mbi_reloc_offset;
950 for (i = 1 ; i < argc ; i++) {
951 if (!strcmp(argv[i], module_separator)) {
953 modp[modules++].cmdline = ((size_t)p) - mbi_reloc_offset;
956 p += strlen(argv[i]);
963 strcpy(p, version_string);
964 mbi->boot_loader_name = ((size_t)p) - mbi_reloc_offset;
965 p += strlen(version_string) + 1;
967 /* Now, do all the loading, and boot it */
968 entry = load_kernel((char *)(mbi->cmdline + mbi_reloc_offset));
969 for (i=0; i<modules; i++) {
970 load_module(&(modp[i]), (char *)(modp[i].cmdline + mbi_reloc_offset));
972 boot(mbi_run_addr, entry);