2 * Copyright (C) Paul Mackerras 1997.
4 * Updates for PPC64 by Todd Inglett, Dave Engebretsen & Peter Bergner.
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
23 extern void flush_cache(void *, unsigned long);
26 /* Value picked to match that used by yaboot */
27 #define PROG_START 0x01400000 /* only used on 64-bit systems */
28 #define RAM_END (512<<20) /* Fixme: use OF */
29 #define ONE_MB 0x100000
32 extern char __bss_start[];
34 extern char _vmlinux_start[];
35 extern char _vmlinux_end[];
36 extern char _initrd_start[];
37 extern char _initrd_end[];
39 /* A buffer that may be edited by tools operating on a zImage binary so as to
40 * edit the command line passed to vmlinux (by setting /chosen/bootargs).
41 * The buffer is put in it's own section so that tools may locate it easier.
43 static char builtin_cmdline[512]
44 __attribute__((section("__builtin_cmdline")));
50 unsigned long memsize;
52 static struct addr_range vmlinux;
53 static struct addr_range vmlinuz;
54 static struct addr_range initrd;
56 static unsigned long elfoffset;
58 static char scratch[46912]; /* scratch space for gunzip, from zlib_inflate_workspacesize() */
59 static char elfheader[256];
62 typedef void (*kernel_entry_t)( unsigned long,
70 static unsigned long claim_base;
78 static void gunzip(void *dst, int dstlen, unsigned char *src, int *lenp)
86 if (src[2] != Z_DEFLATED || (flags & RESERVED) != 0) {
87 printf("bad gzipped data\n\r");
90 if ((flags & EXTRA_FIELD) != 0)
91 i = 12 + src[10] + (src[11] << 8);
92 if ((flags & ORIG_NAME) != 0)
95 if ((flags & COMMENT) != 0)
98 if ((flags & HEAD_CRC) != 0)
101 printf("gunzip: ran out of data in header\n\r");
105 if (zlib_inflate_workspacesize() > sizeof(scratch)) {
106 printf("gunzip needs more mem\n");
109 memset(&s, 0, sizeof(s));
110 s.workspace = scratch;
111 r = zlib_inflateInit2(&s, -MAX_WBITS);
113 printf("inflateInit2 returned %d\n\r", r);
117 s.avail_in = *lenp - i;
119 s.avail_out = dstlen;
120 r = zlib_inflate(&s, Z_FULL_FLUSH);
121 if (r != Z_OK && r != Z_STREAM_END) {
122 printf("inflate returned %d msg: %s\n\r", r, s.msg);
125 *lenp = s.next_out - (unsigned char *) dst;
129 static unsigned long try_claim(unsigned long size)
131 unsigned long addr = 0;
133 for(; claim_base < RAM_END; claim_base += ONE_MB) {
135 printf(" trying: 0x%08lx\n\r", claim_base);
137 addr = (unsigned long)claim(claim_base, size, 0);
138 if ((void *)addr != (void *)-1)
143 claim_base = PAGE_ALIGN(claim_base + size);
147 static int is_elf64(void *hdr)
149 Elf64_Ehdr *elf64 = hdr;
153 if (!(elf64->e_ident[EI_MAG0] == ELFMAG0 &&
154 elf64->e_ident[EI_MAG1] == ELFMAG1 &&
155 elf64->e_ident[EI_MAG2] == ELFMAG2 &&
156 elf64->e_ident[EI_MAG3] == ELFMAG3 &&
157 elf64->e_ident[EI_CLASS] == ELFCLASS64 &&
158 elf64->e_ident[EI_DATA] == ELFDATA2MSB &&
159 elf64->e_type == ET_EXEC &&
160 elf64->e_machine == EM_PPC64))
163 elf64ph = (Elf64_Phdr *)((unsigned long)elf64 +
164 (unsigned long)elf64->e_phoff);
165 for (i = 0; i < (unsigned int)elf64->e_phnum; i++, elf64ph++)
166 if (elf64ph->p_type == PT_LOAD)
168 if (i >= (unsigned int)elf64->e_phnum)
171 elfoffset = (unsigned long)elf64ph->p_offset;
172 vmlinux.size = (unsigned long)elf64ph->p_filesz + elfoffset;
173 vmlinux.memsize = (unsigned long)elf64ph->p_memsz + elfoffset;
175 #if defined(PROG_START)
177 * Maintain a "magic" minimum address. This keeps some older
178 * firmware platforms running.
181 if (claim_base < PROG_START)
182 claim_base = PROG_START;
188 static int is_elf32(void *hdr)
190 Elf32_Ehdr *elf32 = hdr;
194 if (!(elf32->e_ident[EI_MAG0] == ELFMAG0 &&
195 elf32->e_ident[EI_MAG1] == ELFMAG1 &&
196 elf32->e_ident[EI_MAG2] == ELFMAG2 &&
197 elf32->e_ident[EI_MAG3] == ELFMAG3 &&
198 elf32->e_ident[EI_CLASS] == ELFCLASS32 &&
199 elf32->e_ident[EI_DATA] == ELFDATA2MSB &&
200 elf32->e_type == ET_EXEC &&
201 elf32->e_machine == EM_PPC))
204 elf32 = (Elf32_Ehdr *)elfheader;
205 elf32ph = (Elf32_Phdr *) ((unsigned long)elf32 + elf32->e_phoff);
206 for (i = 0; i < elf32->e_phnum; i++, elf32ph++)
207 if (elf32ph->p_type == PT_LOAD)
209 if (i >= elf32->e_phnum)
212 elfoffset = elf32ph->p_offset;
213 vmlinux.size = elf32ph->p_filesz + elf32ph->p_offset;
214 vmlinux.memsize = elf32ph->p_memsz + elf32ph->p_offset;
218 void export_cmdline(void* chosen_handle)
221 char cmdline[2] = { 0, 0 };
223 if (builtin_cmdline[0] == 0)
226 len = getprop(chosen_handle, "bootargs", cmdline, sizeof(cmdline));
227 if (len > 0 && cmdline[0] != 0)
230 setprop(chosen_handle, "bootargs", builtin_cmdline,
231 strlen(builtin_cmdline) + 1);
235 void start(unsigned long a1, unsigned long a2, void *promptr, void *sp)
238 kernel_entry_t kernel_entry;
240 memset(__bss_start, 0, _end - __bss_start);
242 prom = (int (*)(void *)) promptr;
243 chosen_handle = finddevice("/chosen");
244 if (chosen_handle == (void *) -1)
246 if (getprop(chosen_handle, "stdout", &stdout, sizeof(stdout)) != 4)
249 printf("\n\rzImage starting: loaded at 0x%p (sp: 0x%p)\n\r", _start, sp);
252 * The first available claim_base must be above the end of the
253 * the loaded kernel wrapper file (_start to _end includes the
254 * initrd image if it is present) and rounded up to a nice
255 * 1 MB boundary for good measure.
258 claim_base = _ALIGN_UP((unsigned long)_end, ONE_MB);
260 vmlinuz.addr = (unsigned long)_vmlinux_start;
261 vmlinuz.size = (unsigned long)(_vmlinux_end - _vmlinux_start);
263 /* gunzip the ELF header of the kernel */
264 if (*(unsigned short *)vmlinuz.addr == 0x1f8b) {
266 gunzip(elfheader, sizeof(elfheader),
267 (unsigned char *)vmlinuz.addr, &len);
269 memcpy(elfheader, (const void *)vmlinuz.addr, sizeof(elfheader));
271 if (!is_elf64(elfheader) && !is_elf32(elfheader)) {
272 printf("Error: not a valid PPC32 or PPC64 ELF file!\n\r");
276 /* We need to claim the memsize plus the file offset since gzip
277 * will expand the header (file offset), then the kernel, then
278 * possible rubbish we don't care about. But the kernel bss must
279 * be claimed (it will be zero'd by the kernel itself)
281 printf("Allocating 0x%lx bytes for kernel ...\n\r", vmlinux.memsize);
282 vmlinux.addr = try_claim(vmlinux.memsize);
283 if (vmlinux.addr == 0) {
284 printf("Can't allocate memory for kernel image !\n\r");
289 * Now we try to claim memory for the initrd (and copy it there)
291 initrd.size = (unsigned long)(_initrd_end - _initrd_start);
292 initrd.memsize = initrd.size;
293 if ( initrd.size > 0 ) {
294 printf("Allocating 0x%lx bytes for initrd ...\n\r", initrd.size);
295 initrd.addr = try_claim(initrd.size);
296 if (initrd.addr == 0) {
297 printf("Can't allocate memory for initial ramdisk !\n\r");
302 printf("initial ramdisk moving 0x%lx <- 0x%lx (0x%lx bytes)\n\r",
303 initrd.addr, (unsigned long)_initrd_start, initrd.size);
304 memmove((void *)initrd.addr, (void *)_initrd_start, initrd.size);
305 printf("initrd head: 0x%lx\n\r", *((unsigned long *)initrd.addr));
308 /* Eventually gunzip the kernel */
309 if (*(unsigned short *)vmlinuz.addr == 0x1f8b) {
310 printf("gunzipping (0x%lx <- 0x%lx:0x%0lx)...",
311 vmlinux.addr, vmlinuz.addr, vmlinuz.addr+vmlinuz.size);
313 gunzip((void *)vmlinux.addr, vmlinux.memsize,
314 (unsigned char *)vmlinuz.addr, &len);
315 printf("done 0x%lx bytes\n\r", len);
317 memmove((void *)vmlinux.addr,(void *)vmlinuz.addr,vmlinuz.size);
320 export_cmdline(chosen_handle);
322 /* Skip over the ELF header */
324 printf("... skipping 0x%lx bytes of ELF header\n\r",
327 vmlinux.addr += elfoffset;
329 flush_cache((void *)vmlinux.addr, vmlinux.size);
331 kernel_entry = (kernel_entry_t)vmlinux.addr;
333 printf( "kernel:\n\r"
334 " entry addr = 0x%lx\n\r"
338 " bi_recs = 0x%lx,\n\r",
339 (unsigned long)kernel_entry, a1, a2,
340 (unsigned long)prom, NULL);
343 kernel_entry(a1, a2, prom, NULL);
345 printf("Error: Linux kernel returned to zImage bootloader!\n\r");