2 * linux/fs/binfmt_elf.c
4 * These are the functions used to load ELF format executables as used
5 * on SVr4 machines. Information on the format may be found in the book
6 * "UNIX SYSTEM V RELEASE 4 Programmers Guide: Ansi C and Programming Support
9 * Copyright 1993, 1994: Eric Youngdale (ericy@cais.com).
12 #include <linux/module.h>
13 #include <linux/kernel.h>
15 #include <linux/stat.h>
16 #include <linux/time.h>
18 #include <linux/mman.h>
19 #include <linux/a.out.h>
20 #include <linux/errno.h>
21 #include <linux/signal.h>
22 #include <linux/binfmts.h>
23 #include <linux/string.h>
24 #include <linux/file.h>
25 #include <linux/fcntl.h>
26 #include <linux/ptrace.h>
27 #include <linux/slab.h>
28 #include <linux/shm.h>
29 #include <linux/personality.h>
30 #include <linux/elfcore.h>
31 #include <linux/init.h>
32 #include <linux/highuid.h>
33 #include <linux/smp.h>
34 #include <linux/smp_lock.h>
35 #include <linux/compiler.h>
36 #include <linux/highmem.h>
37 #include <linux/pagemap.h>
38 #include <linux/security.h>
39 #include <linux/syscalls.h>
40 #include <linux/random.h>
41 #include <linux/vs_memory.h>
42 #include <linux/vs_cvirt.h>
44 #include <asm/uaccess.h>
45 #include <asm/param.h>
48 #include <linux/elf.h>
50 static int load_elf_binary(struct linux_binprm * bprm, struct pt_regs * regs);
51 static int load_elf_library(struct file*);
52 static unsigned long elf_map (struct file *, unsigned long, struct elf_phdr *, int, int);
53 extern int dump_fpu (struct pt_regs *, elf_fpregset_t *);
56 #define elf_addr_t unsigned long
60 * If we don't support core dumping, then supply a NULL so we
63 #if defined(USE_ELF_CORE_DUMP) && defined(CONFIG_ELF_CORE)
64 static int elf_core_dump(long signr, struct pt_regs * regs, struct file * file);
66 #define elf_core_dump NULL
69 #if ELF_EXEC_PAGESIZE > PAGE_SIZE
70 # define ELF_MIN_ALIGN ELF_EXEC_PAGESIZE
72 # define ELF_MIN_ALIGN PAGE_SIZE
75 #ifndef ELF_CORE_EFLAGS
76 #define ELF_CORE_EFLAGS 0
79 #define ELF_PAGESTART(_v) ((_v) & ~(unsigned long)(ELF_MIN_ALIGN-1))
80 #define ELF_PAGEOFFSET(_v) ((_v) & (ELF_MIN_ALIGN-1))
81 #define ELF_PAGEALIGN(_v) (((_v) + ELF_MIN_ALIGN - 1) & ~(ELF_MIN_ALIGN - 1))
83 static struct linux_binfmt elf_format = {
84 .module = THIS_MODULE,
85 .load_binary = load_elf_binary,
86 .load_shlib = load_elf_library,
87 .core_dump = elf_core_dump,
88 .min_coredump = ELF_EXEC_PAGESIZE
91 #define BAD_ADDR(x) ((unsigned long)(x) >= TASK_SIZE)
93 static int set_brk(unsigned long start, unsigned long end)
95 start = ELF_PAGEALIGN(start);
96 end = ELF_PAGEALIGN(end);
99 down_write(¤t->mm->mmap_sem);
100 addr = do_brk(start, end - start);
101 up_write(¤t->mm->mmap_sem);
105 current->mm->start_brk = current->mm->brk = end;
110 /* We need to explicitly zero any fractional pages
111 after the data section (i.e. bss). This would
112 contain the junk from the file that should not
116 static int padzero(unsigned long elf_bss)
120 nbyte = ELF_PAGEOFFSET(elf_bss);
122 nbyte = ELF_MIN_ALIGN - nbyte;
123 if (clear_user((void __user *) elf_bss, nbyte))
129 /* Let's use some macros to make this stack manipulation a litle clearer */
130 #ifdef CONFIG_STACK_GROWSUP
131 #define STACK_ADD(sp, items) ((elf_addr_t __user *)(sp) + (items))
132 #define STACK_ROUND(sp, items) \
133 ((15 + (unsigned long) ((sp) + (items))) &~ 15UL)
134 #define STACK_ALLOC(sp, len) ({ elf_addr_t __user *old_sp = (elf_addr_t __user *)sp; sp += len; old_sp; })
136 #define STACK_ADD(sp, items) ((elf_addr_t __user *)(sp) - (items))
137 #define STACK_ROUND(sp, items) \
138 (((unsigned long) (sp - items)) &~ 15UL)
139 #define STACK_ALLOC(sp, len) ({ sp -= len ; sp; })
143 create_elf_tables(struct linux_binprm *bprm, struct elfhdr * exec,
144 int interp_aout, unsigned long load_addr,
145 unsigned long interp_load_addr)
147 unsigned long p = bprm->p;
148 int argc = bprm->argc;
149 int envc = bprm->envc;
150 elf_addr_t __user *argv;
151 elf_addr_t __user *envp;
152 elf_addr_t __user *sp;
153 elf_addr_t __user *u_platform;
154 const char *k_platform = ELF_PLATFORM;
156 elf_addr_t *elf_info;
158 struct task_struct *tsk = current;
161 * If this architecture has a platform capability string, copy it
162 * to userspace. In some cases (Sparc), this info is impossible
163 * for userspace to get any other way, in others (i386) it is
169 size_t len = strlen(k_platform) + 1;
172 * In some cases (e.g. Hyper-Threading), we want to avoid L1
173 * evictions by the processes running on the same package. One
174 * thing we can do is to shuffle the initial stack for them.
177 p = arch_align_stack(p);
179 u_platform = (elf_addr_t __user *)STACK_ALLOC(p, len);
180 if (__copy_to_user(u_platform, k_platform, len))
184 /* Create the ELF interpreter info */
185 elf_info = (elf_addr_t *) current->mm->saved_auxv;
186 #define NEW_AUX_ENT(id, val) \
187 do { elf_info[ei_index++] = id; elf_info[ei_index++] = val; } while (0)
191 * ARCH_DLINFO must come first so PPC can do its special alignment of
196 NEW_AUX_ENT(AT_HWCAP, ELF_HWCAP);
197 NEW_AUX_ENT(AT_PAGESZ, ELF_EXEC_PAGESIZE);
198 NEW_AUX_ENT(AT_CLKTCK, CLOCKS_PER_SEC);
199 NEW_AUX_ENT(AT_PHDR, load_addr + exec->e_phoff);
200 NEW_AUX_ENT(AT_PHENT, sizeof (struct elf_phdr));
201 NEW_AUX_ENT(AT_PHNUM, exec->e_phnum);
202 NEW_AUX_ENT(AT_BASE, interp_load_addr);
203 NEW_AUX_ENT(AT_FLAGS, 0);
204 NEW_AUX_ENT(AT_ENTRY, exec->e_entry);
205 NEW_AUX_ENT(AT_UID, (elf_addr_t) tsk->uid);
206 NEW_AUX_ENT(AT_EUID, (elf_addr_t) tsk->euid);
207 NEW_AUX_ENT(AT_GID, (elf_addr_t) tsk->gid);
208 NEW_AUX_ENT(AT_EGID, (elf_addr_t) tsk->egid);
209 NEW_AUX_ENT(AT_SECURE, (elf_addr_t) security_bprm_secureexec(bprm));
211 NEW_AUX_ENT(AT_PLATFORM, (elf_addr_t)(unsigned long)u_platform);
213 if (bprm->interp_flags & BINPRM_FLAGS_EXECFD) {
214 NEW_AUX_ENT(AT_EXECFD, (elf_addr_t) bprm->interp_data);
217 /* AT_NULL is zero; clear the rest too */
218 memset(&elf_info[ei_index], 0,
219 sizeof current->mm->saved_auxv - ei_index * sizeof elf_info[0]);
221 /* And advance past the AT_NULL entry. */
224 sp = STACK_ADD(p, ei_index);
226 items = (argc + 1) + (envc + 1);
228 items += 3; /* a.out interpreters require argv & envp too */
230 items += 1; /* ELF interpreters only put argc on the stack */
232 bprm->p = STACK_ROUND(sp, items);
234 /* Point sp at the lowest address on the stack */
235 #ifdef CONFIG_STACK_GROWSUP
236 sp = (elf_addr_t __user *)bprm->p - items - ei_index;
237 bprm->exec = (unsigned long) sp; /* XXX: PARISC HACK */
239 sp = (elf_addr_t __user *)bprm->p;
242 /* Now, let's put argc (and argv, envp if appropriate) on the stack */
243 if (__put_user(argc, sp++))
247 envp = argv + argc + 1;
248 __put_user((elf_addr_t)(unsigned long)argv, sp++);
249 __put_user((elf_addr_t)(unsigned long)envp, sp++);
252 envp = argv + argc + 1;
255 /* Populate argv and envp */
256 p = current->mm->arg_end = current->mm->arg_start;
259 __put_user((elf_addr_t)p, argv++);
260 len = strnlen_user((void __user *)p, PAGE_SIZE*MAX_ARG_PAGES);
261 if (!len || len > PAGE_SIZE*MAX_ARG_PAGES)
265 if (__put_user(0, argv))
267 current->mm->arg_end = current->mm->env_start = p;
270 __put_user((elf_addr_t)p, envp++);
271 len = strnlen_user((void __user *)p, PAGE_SIZE*MAX_ARG_PAGES);
272 if (!len || len > PAGE_SIZE*MAX_ARG_PAGES)
276 if (__put_user(0, envp))
278 current->mm->env_end = p;
280 /* Put the elf_info on the stack in the right place. */
281 sp = (elf_addr_t __user *)envp + 1;
282 if (copy_to_user(sp, elf_info, ei_index * sizeof(elf_addr_t)))
289 static unsigned long elf_map(struct file *filep, unsigned long addr,
290 struct elf_phdr *eppnt, int prot, int type)
292 unsigned long map_addr;
293 unsigned long pageoffset = ELF_PAGEOFFSET(eppnt->p_vaddr);
295 down_write(¤t->mm->mmap_sem);
296 /* mmap() will return -EINVAL if given a zero size, but a
297 * segment with zero filesize is perfectly valid */
298 if (eppnt->p_filesz + pageoffset)
299 map_addr = do_mmap(filep, ELF_PAGESTART(addr),
300 eppnt->p_filesz + pageoffset, prot, type,
301 eppnt->p_offset - pageoffset);
303 map_addr = ELF_PAGESTART(addr);
304 up_write(¤t->mm->mmap_sem);
308 #endif /* !elf_map */
310 /* This is much more generalized than the library routine read function,
311 so we keep this separate. Technically the library read function
312 is only provided so that we can read a.out libraries that have
315 static unsigned long load_elf_interp(struct elfhdr * interp_elf_ex,
316 struct file * interpreter,
317 unsigned long *interp_load_addr)
319 struct elf_phdr *elf_phdata;
320 struct elf_phdr *eppnt;
321 unsigned long load_addr = 0;
322 int load_addr_set = 0;
323 unsigned long last_bss = 0, elf_bss = 0;
324 unsigned long error = ~0UL;
327 /* First of all, some simple consistency checks */
328 if (interp_elf_ex->e_type != ET_EXEC &&
329 interp_elf_ex->e_type != ET_DYN)
331 if (!elf_check_arch(interp_elf_ex))
333 if (!interpreter->f_op || !interpreter->f_op->mmap)
337 * If the size of this structure has changed, then punt, since
338 * we will be doing the wrong thing.
340 if (interp_elf_ex->e_phentsize != sizeof(struct elf_phdr))
342 if (interp_elf_ex->e_phnum < 1 ||
343 interp_elf_ex->e_phnum > 65536U / sizeof(struct elf_phdr))
346 /* Now read in all of the header information */
348 size = sizeof(struct elf_phdr) * interp_elf_ex->e_phnum;
349 if (size > ELF_MIN_ALIGN)
351 elf_phdata = (struct elf_phdr *) kmalloc(size, GFP_KERNEL);
355 retval = kernel_read(interpreter,interp_elf_ex->e_phoff,(char *)elf_phdata,size);
357 if (retval != size) {
364 for (i=0; i<interp_elf_ex->e_phnum; i++, eppnt++) {
365 if (eppnt->p_type == PT_LOAD) {
366 int elf_type = MAP_PRIVATE | MAP_DENYWRITE;
368 unsigned long vaddr = 0;
369 unsigned long k, map_addr;
371 if (eppnt->p_flags & PF_R) elf_prot = PROT_READ;
372 if (eppnt->p_flags & PF_W) elf_prot |= PROT_WRITE;
373 if (eppnt->p_flags & PF_X) elf_prot |= PROT_EXEC;
374 vaddr = eppnt->p_vaddr;
375 if (interp_elf_ex->e_type == ET_EXEC || load_addr_set)
376 elf_type |= MAP_FIXED;
378 map_addr = elf_map(interpreter, load_addr + vaddr, eppnt, elf_prot, elf_type);
380 if (BAD_ADDR(map_addr))
383 if (!load_addr_set && interp_elf_ex->e_type == ET_DYN) {
384 load_addr = map_addr - ELF_PAGESTART(vaddr);
389 * Check to see if the section's size will overflow the
390 * allowed task size. Note that p_filesz must always be
391 * <= p_memsize so it is only necessary to check p_memsz.
393 k = load_addr + eppnt->p_vaddr;
394 if (BAD_ADDR(k) || eppnt->p_filesz > eppnt->p_memsz ||
395 eppnt->p_memsz > TASK_SIZE || TASK_SIZE - eppnt->p_memsz < k) {
401 * Find the end of the file mapping for this phdr, and keep
402 * track of the largest address we see for this.
404 k = load_addr + eppnt->p_vaddr + eppnt->p_filesz;
409 * Do the same thing for the memory mapping - between
410 * elf_bss and last_bss is the bss section.
412 k = load_addr + eppnt->p_memsz + eppnt->p_vaddr;
419 * Now fill out the bss section. First pad the last page up
420 * to the page boundary, and then perform a mmap to make sure
421 * that there are zero-mapped pages up to and including the
424 if (padzero(elf_bss)) {
429 elf_bss = ELF_PAGESTART(elf_bss + ELF_MIN_ALIGN - 1); /* What we have mapped so far */
431 /* Map the last of the bss segment */
432 if (last_bss > elf_bss) {
433 down_write(¤t->mm->mmap_sem);
434 error = do_brk(elf_bss, last_bss - elf_bss);
435 up_write(¤t->mm->mmap_sem);
440 *interp_load_addr = load_addr;
441 error = ((unsigned long) interp_elf_ex->e_entry) + load_addr;
449 static unsigned long load_aout_interp(struct exec * interp_ex,
450 struct file * interpreter)
452 unsigned long text_data, elf_entry = ~0UL;
456 current->mm->end_code = interp_ex->a_text;
457 text_data = interp_ex->a_text + interp_ex->a_data;
458 current->mm->end_data = text_data;
459 current->mm->brk = interp_ex->a_bss + text_data;
461 switch (N_MAGIC(*interp_ex)) {
464 addr = (char __user *)0;
468 offset = N_TXTOFF(*interp_ex);
469 addr = (char __user *) N_TXTADDR(*interp_ex);
475 down_write(¤t->mm->mmap_sem);
476 do_brk(0, text_data);
477 up_write(¤t->mm->mmap_sem);
478 if (!interpreter->f_op || !interpreter->f_op->read)
480 if (interpreter->f_op->read(interpreter, addr, text_data, &offset) < 0)
482 flush_icache_range((unsigned long)addr,
483 (unsigned long)addr + text_data);
486 down_write(¤t->mm->mmap_sem);
487 do_brk(ELF_PAGESTART(text_data + ELF_MIN_ALIGN - 1),
489 up_write(¤t->mm->mmap_sem);
490 elf_entry = interp_ex->a_entry;
497 * These are the functions used to load ELF style executables and shared
498 * libraries. There is no binary dependent code anywhere else.
501 #define INTERPRETER_NONE 0
502 #define INTERPRETER_AOUT 1
503 #define INTERPRETER_ELF 2
506 static unsigned long randomize_stack_top(unsigned long stack_top)
508 unsigned int random_variable = 0;
510 if (current->flags & PF_RANDOMIZE)
511 random_variable = get_random_int() % (8*1024*1024);
512 #ifdef CONFIG_STACK_GROWSUP
513 return PAGE_ALIGN(stack_top + random_variable);
515 return PAGE_ALIGN(stack_top - random_variable);
519 static int load_elf_binary(struct linux_binprm * bprm, struct pt_regs * regs)
521 struct file *interpreter = NULL; /* to shut gcc up */
522 unsigned long load_addr = 0, load_bias = 0;
523 int load_addr_set = 0;
524 char * elf_interpreter = NULL;
525 unsigned int interpreter_type = INTERPRETER_NONE;
526 unsigned char ibcs2_interpreter = 0;
528 struct elf_phdr * elf_ppnt, *elf_phdata;
529 unsigned long elf_bss, elf_brk;
533 unsigned long elf_entry, interp_load_addr = 0;
534 unsigned long start_code, end_code, start_data, end_data;
535 unsigned long reloc_func_desc = 0;
536 char passed_fileno[6];
537 struct files_struct *files;
538 int have_pt_gnu_stack, executable_stack = EXSTACK_DEFAULT;
539 unsigned long def_flags = 0;
541 struct elfhdr elf_ex;
542 struct elfhdr interp_elf_ex;
543 struct exec interp_ex;
546 loc = kmalloc(sizeof(*loc), GFP_KERNEL);
552 /* Get the exec-header */
553 loc->elf_ex = *((struct elfhdr *) bprm->buf);
556 /* First of all, some simple consistency checks */
557 if (memcmp(loc->elf_ex.e_ident, ELFMAG, SELFMAG) != 0)
560 if (loc->elf_ex.e_type != ET_EXEC && loc->elf_ex.e_type != ET_DYN)
562 if (!elf_check_arch(&loc->elf_ex))
564 if (!bprm->file->f_op||!bprm->file->f_op->mmap)
567 /* Now read in all of the header information */
569 if (loc->elf_ex.e_phentsize != sizeof(struct elf_phdr))
571 if (loc->elf_ex.e_phnum < 1 ||
572 loc->elf_ex.e_phnum > 65536U / sizeof(struct elf_phdr))
574 size = loc->elf_ex.e_phnum * sizeof(struct elf_phdr);
576 elf_phdata = (struct elf_phdr *) kmalloc(size, GFP_KERNEL);
580 retval = kernel_read(bprm->file, loc->elf_ex.e_phoff, (char *) elf_phdata, size);
581 if (retval != size) {
587 files = current->files; /* Refcounted so ok */
588 retval = unshare_files();
591 if (files == current->files) {
592 put_files_struct(files);
596 /* exec will make our files private anyway, but for the a.out
597 loader stuff we need to do it earlier */
599 retval = get_unused_fd();
602 get_file(bprm->file);
603 fd_install(elf_exec_fileno = retval, bprm->file);
605 elf_ppnt = elf_phdata;
614 for (i = 0; i < loc->elf_ex.e_phnum; i++) {
615 if (elf_ppnt->p_type == PT_INTERP) {
616 /* This is the program interpreter used for
617 * shared libraries - for now assume that this
618 * is an a.out format binary
622 if (elf_ppnt->p_filesz > PATH_MAX ||
623 elf_ppnt->p_filesz < 2)
627 elf_interpreter = kmalloc(elf_ppnt->p_filesz,
629 if (!elf_interpreter)
632 retval = kernel_read(bprm->file, elf_ppnt->p_offset,
635 if (retval != elf_ppnt->p_filesz) {
638 goto out_free_interp;
640 /* make sure path is NULL terminated */
642 if (elf_interpreter[elf_ppnt->p_filesz - 1] != '\0')
643 goto out_free_interp;
645 /* If the program interpreter is one of these two,
646 * then assume an iBCS2 image. Otherwise assume
647 * a native linux image.
649 if (strcmp(elf_interpreter,"/usr/lib/libc.so.1") == 0 ||
650 strcmp(elf_interpreter,"/usr/lib/ld.so.1") == 0)
651 ibcs2_interpreter = 1;
654 * The early SET_PERSONALITY here is so that the lookup
655 * for the interpreter happens in the namespace of the
656 * to-be-execed image. SET_PERSONALITY can select an
659 * However, SET_PERSONALITY is NOT allowed to switch
660 * this task into the new images's memory mapping
661 * policy - that is, TASK_SIZE must still evaluate to
662 * that which is appropriate to the execing application.
663 * This is because exit_mmap() needs to have TASK_SIZE
664 * evaluate to the size of the old image.
666 * So if (say) a 64-bit application is execing a 32-bit
667 * application it is the architecture's responsibility
668 * to defer changing the value of TASK_SIZE until the
669 * switch really is going to happen - do this in
670 * flush_thread(). - akpm
672 SET_PERSONALITY(loc->elf_ex, ibcs2_interpreter);
674 interpreter = open_exec(elf_interpreter);
675 retval = PTR_ERR(interpreter);
676 if (IS_ERR(interpreter))
677 goto out_free_interp;
678 retval = kernel_read(interpreter, 0, bprm->buf, BINPRM_BUF_SIZE);
679 if (retval != BINPRM_BUF_SIZE) {
682 goto out_free_dentry;
685 /* Get the exec headers */
686 loc->interp_ex = *((struct exec *) bprm->buf);
687 loc->interp_elf_ex = *((struct elfhdr *) bprm->buf);
693 elf_ppnt = elf_phdata;
694 for (i = 0; i < loc->elf_ex.e_phnum; i++, elf_ppnt++)
695 if (elf_ppnt->p_type == PT_GNU_STACK) {
696 if (elf_ppnt->p_flags & PF_X)
697 executable_stack = EXSTACK_ENABLE_X;
699 executable_stack = EXSTACK_DISABLE_X;
702 have_pt_gnu_stack = (i < loc->elf_ex.e_phnum);
704 /* Some simple consistency checks for the interpreter */
705 if (elf_interpreter) {
706 interpreter_type = INTERPRETER_ELF | INTERPRETER_AOUT;
708 /* Now figure out which format our binary is */
709 if ((N_MAGIC(loc->interp_ex) != OMAGIC) &&
710 (N_MAGIC(loc->interp_ex) != ZMAGIC) &&
711 (N_MAGIC(loc->interp_ex) != QMAGIC))
712 interpreter_type = INTERPRETER_ELF;
714 if (memcmp(loc->interp_elf_ex.e_ident, ELFMAG, SELFMAG) != 0)
715 interpreter_type &= ~INTERPRETER_ELF;
718 if (!interpreter_type)
719 goto out_free_dentry;
721 /* Make sure only one type was selected */
722 if ((interpreter_type & INTERPRETER_ELF) &&
723 interpreter_type != INTERPRETER_ELF) {
724 // FIXME - ratelimit this before re-enabling
725 // printk(KERN_WARNING "ELF: Ambiguous type, using ELF\n");
726 interpreter_type = INTERPRETER_ELF;
728 /* Verify the interpreter has a valid arch */
729 if ((interpreter_type == INTERPRETER_ELF) &&
730 !elf_check_arch(&loc->interp_elf_ex))
731 goto out_free_dentry;
733 /* Executables without an interpreter also need a personality */
734 SET_PERSONALITY(loc->elf_ex, ibcs2_interpreter);
737 /* OK, we are done with that, now set up the arg stuff,
738 and then start this sucker up */
740 if ((!bprm->sh_bang) && (interpreter_type == INTERPRETER_AOUT)) {
741 char *passed_p = passed_fileno;
742 sprintf(passed_fileno, "%d", elf_exec_fileno);
744 if (elf_interpreter) {
745 retval = copy_strings_kernel(1, &passed_p, bprm);
747 goto out_free_dentry;
752 /* Flush all traces of the currently running executable */
753 retval = flush_old_exec(bprm);
755 goto out_free_dentry;
757 /* Discard our unneeded old files struct */
760 put_files_struct(files);
764 /* OK, This is the point of no return */
765 current->mm->start_data = 0;
766 current->mm->end_data = 0;
767 current->mm->end_code = 0;
768 current->mm->mmap = NULL;
769 current->flags &= ~PF_FORKNOEXEC;
770 current->mm->def_flags = def_flags;
772 /* Do this immediately, since STACK_TOP as used in setup_arg_pages
773 may depend on the personality. */
774 SET_PERSONALITY(loc->elf_ex, ibcs2_interpreter);
775 if (elf_read_implies_exec(loc->elf_ex, executable_stack))
776 current->personality |= READ_IMPLIES_EXEC;
778 if ( !(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
779 current->flags |= PF_RANDOMIZE;
780 arch_pick_mmap_layout(current->mm);
782 /* Do this so that we can load the interpreter, if need be. We will
783 change some of these later */
784 current->mm->free_area_cache = current->mm->mmap_base;
785 current->mm->cached_hole_size = 0;
786 retval = setup_arg_pages(bprm, randomize_stack_top(STACK_TOP),
789 send_sig(SIGKILL, current, 0);
790 goto out_free_dentry;
793 current->mm->start_stack = bprm->p;
795 /* Now we do a little grungy work by mmaping the ELF image into
796 the correct location in memory. At this point, we assume that
797 the image should be loaded at fixed address, not at a variable
800 for(i = 0, elf_ppnt = elf_phdata; i < loc->elf_ex.e_phnum; i++, elf_ppnt++) {
801 int elf_prot = 0, elf_flags;
802 unsigned long k, vaddr;
804 if (elf_ppnt->p_type != PT_LOAD)
807 if (unlikely (elf_brk > elf_bss)) {
810 /* There was a PT_LOAD segment with p_memsz > p_filesz
811 before this one. Map anonymous pages, if needed,
812 and clear the area. */
813 retval = set_brk (elf_bss + load_bias,
814 elf_brk + load_bias);
816 send_sig(SIGKILL, current, 0);
817 goto out_free_dentry;
819 nbyte = ELF_PAGEOFFSET(elf_bss);
821 nbyte = ELF_MIN_ALIGN - nbyte;
822 if (nbyte > elf_brk - elf_bss)
823 nbyte = elf_brk - elf_bss;
824 if (clear_user((void __user *)elf_bss +
827 * This bss-zeroing can fail if the ELF
828 * file specifies odd protections. So
829 * we don't check the return value
835 if (elf_ppnt->p_flags & PF_R) elf_prot |= PROT_READ;
836 if (elf_ppnt->p_flags & PF_W) elf_prot |= PROT_WRITE;
837 if (elf_ppnt->p_flags & PF_X) elf_prot |= PROT_EXEC;
839 elf_flags = MAP_PRIVATE|MAP_DENYWRITE|MAP_EXECUTABLE;
841 vaddr = elf_ppnt->p_vaddr;
842 if (loc->elf_ex.e_type == ET_EXEC || load_addr_set) {
843 elf_flags |= MAP_FIXED;
844 } else if (loc->elf_ex.e_type == ET_DYN) {
845 /* Try and get dynamic programs out of the way of the default mmap
846 base, as well as whatever program they might try to exec. This
847 is because the brk will follow the loader, and is not movable. */
848 load_bias = ELF_PAGESTART(ELF_ET_DYN_BASE - vaddr);
851 error = elf_map(bprm->file, load_bias + vaddr, elf_ppnt, elf_prot, elf_flags);
852 if (BAD_ADDR(error)) {
853 send_sig(SIGKILL, current, 0);
854 goto out_free_dentry;
857 if (!load_addr_set) {
859 load_addr = (elf_ppnt->p_vaddr - elf_ppnt->p_offset);
860 if (loc->elf_ex.e_type == ET_DYN) {
862 ELF_PAGESTART(load_bias + vaddr);
863 load_addr += load_bias;
864 reloc_func_desc = load_bias;
867 k = elf_ppnt->p_vaddr;
868 if (k < start_code) start_code = k;
869 if (start_data < k) start_data = k;
872 * Check to see if the section's size will overflow the
873 * allowed task size. Note that p_filesz must always be
874 * <= p_memsz so it is only necessary to check p_memsz.
876 if (BAD_ADDR(k) || elf_ppnt->p_filesz > elf_ppnt->p_memsz ||
877 elf_ppnt->p_memsz > TASK_SIZE ||
878 TASK_SIZE - elf_ppnt->p_memsz < k) {
879 /* set_brk can never work. Avoid overflows. */
880 send_sig(SIGKILL, current, 0);
881 goto out_free_dentry;
884 k = elf_ppnt->p_vaddr + elf_ppnt->p_filesz;
888 if ((elf_ppnt->p_flags & PF_X) && end_code < k)
892 k = elf_ppnt->p_vaddr + elf_ppnt->p_memsz;
897 loc->elf_ex.e_entry += load_bias;
898 elf_bss += load_bias;
899 elf_brk += load_bias;
900 start_code += load_bias;
901 end_code += load_bias;
902 start_data += load_bias;
903 end_data += load_bias;
905 /* Calling set_brk effectively mmaps the pages that we need
906 * for the bss and break sections. We must do this before
907 * mapping in the interpreter, to make sure it doesn't wind
908 * up getting placed where the bss needs to go.
910 retval = set_brk(elf_bss, elf_brk);
912 send_sig(SIGKILL, current, 0);
913 goto out_free_dentry;
915 if (likely(elf_bss != elf_brk) && unlikely(padzero(elf_bss))) {
916 send_sig(SIGSEGV, current, 0);
917 retval = -EFAULT; /* Nobody gets to see this, but.. */
918 goto out_free_dentry;
921 if (elf_interpreter) {
922 if (interpreter_type == INTERPRETER_AOUT)
923 elf_entry = load_aout_interp(&loc->interp_ex,
926 elf_entry = load_elf_interp(&loc->interp_elf_ex,
929 if (BAD_ADDR(elf_entry)) {
930 force_sig(SIGSEGV, current);
931 retval = IS_ERR((void *)elf_entry) ?
932 (int)elf_entry : -EINVAL;
933 goto out_free_dentry;
935 reloc_func_desc = interp_load_addr;
937 allow_write_access(interpreter);
939 kfree(elf_interpreter);
941 elf_entry = loc->elf_ex.e_entry;
942 if (BAD_ADDR(elf_entry)) {
943 force_sig(SIGSEGV, current);
945 goto out_free_dentry;
951 if (interpreter_type != INTERPRETER_AOUT)
952 sys_close(elf_exec_fileno);
954 set_binfmt(&elf_format);
956 #ifdef ARCH_HAS_SETUP_ADDITIONAL_PAGES
957 retval = arch_setup_additional_pages(bprm, executable_stack);
959 send_sig(SIGKILL, current, 0);
962 #endif /* ARCH_HAS_SETUP_ADDITIONAL_PAGES */
965 current->flags &= ~PF_FORKNOEXEC;
966 create_elf_tables(bprm, &loc->elf_ex, (interpreter_type == INTERPRETER_AOUT),
967 load_addr, interp_load_addr);
968 /* N.B. passed_fileno might not be initialized? */
969 if (interpreter_type == INTERPRETER_AOUT)
970 current->mm->arg_start += strlen(passed_fileno) + 1;
971 current->mm->end_code = end_code;
972 current->mm->start_code = start_code;
973 current->mm->start_data = start_data;
974 current->mm->end_data = end_data;
975 current->mm->start_stack = bprm->p;
977 if (current->personality & MMAP_PAGE_ZERO) {
978 /* Why this, you ask??? Well SVr4 maps page 0 as read-only,
979 and some applications "depend" upon this behavior.
980 Since we do not have the power to recompile these, we
981 emulate the SVr4 behavior. Sigh. */
982 down_write(¤t->mm->mmap_sem);
983 error = do_mmap(NULL, 0, PAGE_SIZE, PROT_READ | PROT_EXEC,
984 MAP_FIXED | MAP_PRIVATE, 0);
985 up_write(¤t->mm->mmap_sem);
990 * The ABI may specify that certain registers be set up in special
991 * ways (on i386 %edx is the address of a DT_FINI function, for
992 * example. In addition, it may also specify (eg, PowerPC64 ELF)
993 * that the e_entry field is the address of the function descriptor
994 * for the startup routine, rather than the address of the startup
995 * routine itself. This macro performs whatever initialization to
996 * the regs structure is required as well as any relocations to the
997 * function descriptor entries when executing dynamically links apps.
999 ELF_PLAT_INIT(regs, reloc_func_desc);
1002 start_thread(regs, elf_entry, bprm->p);
1003 if (unlikely(current->ptrace & PT_PTRACED)) {
1004 if (current->ptrace & PT_TRACE_EXEC)
1005 ptrace_notify ((PTRACE_EVENT_EXEC << 8) | SIGTRAP);
1007 send_sig(SIGTRAP, current, 0);
1017 allow_write_access(interpreter);
1021 kfree(elf_interpreter);
1023 sys_close(elf_exec_fileno);
1026 put_files_struct(current->files);
1027 current->files = files;
1034 /* This is really simpleminded and specialized - we are loading an
1035 a.out library that is given an ELF header. */
1037 static int load_elf_library(struct file *file)
1039 struct elf_phdr *elf_phdata;
1040 struct elf_phdr *eppnt;
1041 unsigned long elf_bss, bss, len;
1042 int retval, error, i, j;
1043 struct elfhdr elf_ex;
1046 retval = kernel_read(file, 0, (char *) &elf_ex, sizeof(elf_ex));
1047 if (retval != sizeof(elf_ex))
1050 if (memcmp(elf_ex.e_ident, ELFMAG, SELFMAG) != 0)
1053 /* First of all, some simple consistency checks */
1054 if (elf_ex.e_type != ET_EXEC || elf_ex.e_phnum > 2 ||
1055 !elf_check_arch(&elf_ex) || !file->f_op || !file->f_op->mmap)
1058 /* Now read in all of the header information */
1060 j = sizeof(struct elf_phdr) * elf_ex.e_phnum;
1061 /* j < ELF_MIN_ALIGN because elf_ex.e_phnum <= 2 */
1064 elf_phdata = kmalloc(j, GFP_KERNEL);
1070 retval = kernel_read(file, elf_ex.e_phoff, (char *)eppnt, j);
1074 for (j = 0, i = 0; i<elf_ex.e_phnum; i++)
1075 if ((eppnt + i)->p_type == PT_LOAD)
1080 while (eppnt->p_type != PT_LOAD)
1083 /* Now use mmap to map the library into memory. */
1084 down_write(¤t->mm->mmap_sem);
1085 error = do_mmap(file,
1086 ELF_PAGESTART(eppnt->p_vaddr),
1088 ELF_PAGEOFFSET(eppnt->p_vaddr)),
1089 PROT_READ | PROT_WRITE | PROT_EXEC,
1090 MAP_FIXED | MAP_PRIVATE | MAP_DENYWRITE,
1092 ELF_PAGEOFFSET(eppnt->p_vaddr)));
1093 up_write(¤t->mm->mmap_sem);
1094 if (error != ELF_PAGESTART(eppnt->p_vaddr))
1097 elf_bss = eppnt->p_vaddr + eppnt->p_filesz;
1098 if (padzero(elf_bss)) {
1103 len = ELF_PAGESTART(eppnt->p_filesz + eppnt->p_vaddr + ELF_MIN_ALIGN - 1);
1104 bss = eppnt->p_memsz + eppnt->p_vaddr;
1106 down_write(¤t->mm->mmap_sem);
1107 do_brk(len, bss - len);
1108 up_write(¤t->mm->mmap_sem);
1119 * Note that some platforms still use traditional core dumps and not
1120 * the ELF core dump. Each platform can select it as appropriate.
1122 #if defined(USE_ELF_CORE_DUMP) && defined(CONFIG_ELF_CORE)
1127 * Modelled on fs/exec.c:aout_core_dump()
1128 * Jeremy Fitzhardinge <jeremy@sw.oz.au>
1131 * These are the only things you should do on a core-file: use only these
1132 * functions to write out all the necessary info.
1134 static int dump_write(struct file *file, const void *addr, int nr)
1136 return file->f_op->write(file, addr, nr, &file->f_pos) == nr;
1139 static int dump_seek(struct file *file, loff_t off)
1141 if (file->f_op->llseek) {
1142 if (file->f_op->llseek(file, off, 0) != off)
1150 * Decide whether a segment is worth dumping; default is yes to be
1151 * sure (missing info is worse than too much; etc).
1152 * Personally I'd include everything, and use the coredump limit...
1154 * I think we should skip something. But I am not sure how. H.J.
1156 static int maydump(struct vm_area_struct *vma)
1158 /* Do not dump I/O mapped devices or special mappings */
1159 if (vma->vm_flags & (VM_IO | VM_RESERVED))
1162 /* Dump shared memory only if mapped from an anonymous file. */
1163 if (vma->vm_flags & VM_SHARED)
1164 return vma->vm_file->f_dentry->d_inode->i_nlink == 0;
1166 /* If it hasn't been written to, don't write it out */
1173 #define roundup(x, y) ((((x)+((y)-1))/(y))*(y))
1175 /* An ELF note in memory */
1180 unsigned int datasz;
1184 static int notesize(struct memelfnote *en)
1188 sz = sizeof(struct elf_note);
1189 sz += roundup(strlen(en->name) + 1, 4);
1190 sz += roundup(en->datasz, 4);
1195 #define DUMP_WRITE(addr, nr) \
1196 do { if (!dump_write(file, (addr), (nr))) return 0; } while(0)
1197 #define DUMP_SEEK(off) \
1198 do { if (!dump_seek(file, (off))) return 0; } while(0)
1200 static int writenote(struct memelfnote *men, struct file *file)
1204 en.n_namesz = strlen(men->name) + 1;
1205 en.n_descsz = men->datasz;
1206 en.n_type = men->type;
1208 DUMP_WRITE(&en, sizeof(en));
1209 DUMP_WRITE(men->name, en.n_namesz);
1210 /* XXX - cast from long long to long to avoid need for libgcc.a */
1211 DUMP_SEEK(roundup((unsigned long)file->f_pos, 4)); /* XXX */
1212 DUMP_WRITE(men->data, men->datasz);
1213 DUMP_SEEK(roundup((unsigned long)file->f_pos, 4)); /* XXX */
1220 #define DUMP_WRITE(addr, nr) \
1221 if ((size += (nr)) > limit || !dump_write(file, (addr), (nr))) \
1223 #define DUMP_SEEK(off) \
1224 if (!dump_seek(file, (off))) \
1227 static void fill_elf_header(struct elfhdr *elf, int segs)
1229 memcpy(elf->e_ident, ELFMAG, SELFMAG);
1230 elf->e_ident[EI_CLASS] = ELF_CLASS;
1231 elf->e_ident[EI_DATA] = ELF_DATA;
1232 elf->e_ident[EI_VERSION] = EV_CURRENT;
1233 elf->e_ident[EI_OSABI] = ELF_OSABI;
1234 memset(elf->e_ident+EI_PAD, 0, EI_NIDENT-EI_PAD);
1236 elf->e_type = ET_CORE;
1237 elf->e_machine = ELF_ARCH;
1238 elf->e_version = EV_CURRENT;
1240 elf->e_phoff = sizeof(struct elfhdr);
1242 elf->e_flags = ELF_CORE_EFLAGS;
1243 elf->e_ehsize = sizeof(struct elfhdr);
1244 elf->e_phentsize = sizeof(struct elf_phdr);
1245 elf->e_phnum = segs;
1246 elf->e_shentsize = 0;
1248 elf->e_shstrndx = 0;
1252 static void fill_elf_note_phdr(struct elf_phdr *phdr, int sz, off_t offset)
1254 phdr->p_type = PT_NOTE;
1255 phdr->p_offset = offset;
1258 phdr->p_filesz = sz;
1265 static void fill_note(struct memelfnote *note, const char *name, int type,
1266 unsigned int sz, void *data)
1276 * fill up all the fields in prstatus from the given task struct, except registers
1277 * which need to be filled up separately.
1279 static void fill_prstatus(struct elf_prstatus *prstatus,
1280 struct task_struct *p, long signr)
1282 prstatus->pr_info.si_signo = prstatus->pr_cursig = signr;
1283 prstatus->pr_sigpend = p->pending.signal.sig[0];
1284 prstatus->pr_sighold = p->blocked.sig[0];
1285 prstatus->pr_pid = p->pid;
1286 prstatus->pr_ppid = p->parent->pid;
1287 prstatus->pr_pgrp = process_group(p);
1288 prstatus->pr_sid = p->signal->session;
1289 if (thread_group_leader(p)) {
1291 * This is the record for the group leader. Add in the
1292 * cumulative times of previous dead threads. This total
1293 * won't include the time of each live thread whose state
1294 * is included in the core dump. The final total reported
1295 * to our parent process when it calls wait4 will include
1296 * those sums as well as the little bit more time it takes
1297 * this and each other thread to finish dying after the
1298 * core dump synchronization phase.
1300 cputime_to_timeval(cputime_add(p->utime, p->signal->utime),
1301 &prstatus->pr_utime);
1302 cputime_to_timeval(cputime_add(p->stime, p->signal->stime),
1303 &prstatus->pr_stime);
1305 cputime_to_timeval(p->utime, &prstatus->pr_utime);
1306 cputime_to_timeval(p->stime, &prstatus->pr_stime);
1308 cputime_to_timeval(p->signal->cutime, &prstatus->pr_cutime);
1309 cputime_to_timeval(p->signal->cstime, &prstatus->pr_cstime);
1312 static int fill_psinfo(struct elf_prpsinfo *psinfo, struct task_struct *p,
1313 struct mm_struct *mm)
1315 unsigned int i, len;
1317 /* first copy the parameters from user space */
1318 memset(psinfo, 0, sizeof(struct elf_prpsinfo));
1320 len = mm->arg_end - mm->arg_start;
1321 if (len >= ELF_PRARGSZ)
1322 len = ELF_PRARGSZ-1;
1323 if (copy_from_user(&psinfo->pr_psargs,
1324 (const char __user *)mm->arg_start, len))
1326 for(i = 0; i < len; i++)
1327 if (psinfo->pr_psargs[i] == 0)
1328 psinfo->pr_psargs[i] = ' ';
1329 psinfo->pr_psargs[len] = 0;
1331 psinfo->pr_pid = p->pid;
1332 psinfo->pr_ppid = p->parent->pid;
1333 psinfo->pr_pgrp = process_group(p);
1334 psinfo->pr_sid = p->signal->session;
1336 i = p->state ? ffz(~p->state) + 1 : 0;
1337 psinfo->pr_state = i;
1338 psinfo->pr_sname = (i < 0 || i > 5) ? '.' : "RSDTZW"[i];
1339 psinfo->pr_zomb = psinfo->pr_sname == 'Z';
1340 psinfo->pr_nice = task_nice(p);
1341 psinfo->pr_flag = p->flags;
1342 SET_UID(psinfo->pr_uid, p->uid);
1343 SET_GID(psinfo->pr_gid, p->gid);
1344 strncpy(psinfo->pr_fname, p->comm, sizeof(psinfo->pr_fname));
1349 /* Here is the structure in which status of each thread is captured. */
1350 struct elf_thread_status
1352 struct list_head list;
1353 struct elf_prstatus prstatus; /* NT_PRSTATUS */
1354 elf_fpregset_t fpu; /* NT_PRFPREG */
1355 struct task_struct *thread;
1356 #ifdef ELF_CORE_COPY_XFPREGS
1357 elf_fpxregset_t xfpu; /* NT_PRXFPREG */
1359 struct memelfnote notes[3];
1364 * In order to add the specific thread information for the elf file format,
1365 * we need to keep a linked list of every threads pr_status and then
1366 * create a single section for them in the final core file.
1368 static int elf_dump_thread_status(long signr, struct elf_thread_status *t)
1371 struct task_struct *p = t->thread;
1374 fill_prstatus(&t->prstatus, p, signr);
1375 elf_core_copy_task_regs(p, &t->prstatus.pr_reg);
1377 fill_note(&t->notes[0], "CORE", NT_PRSTATUS, sizeof(t->prstatus), &(t->prstatus));
1379 sz += notesize(&t->notes[0]);
1381 if ((t->prstatus.pr_fpvalid = elf_core_copy_task_fpregs(p, NULL, &t->fpu))) {
1382 fill_note(&t->notes[1], "CORE", NT_PRFPREG, sizeof(t->fpu), &(t->fpu));
1384 sz += notesize(&t->notes[1]);
1387 #ifdef ELF_CORE_COPY_XFPREGS
1388 if (elf_core_copy_task_xfpregs(p, &t->xfpu)) {
1389 fill_note(&t->notes[2], "LINUX", NT_PRXFPREG, sizeof(t->xfpu), &t->xfpu);
1391 sz += notesize(&t->notes[2]);
1400 * This is a two-pass process; first we find the offsets of the bits,
1401 * and then they are actually written out. If we run out of core limit
1404 static int elf_core_dump(long signr, struct pt_regs * regs, struct file * file)
1412 struct vm_area_struct *vma;
1413 struct elfhdr *elf = NULL;
1414 off_t offset = 0, dataoff;
1415 unsigned long limit = current->signal->rlim[RLIMIT_CORE].rlim_cur;
1417 struct memelfnote *notes = NULL;
1418 struct elf_prstatus *prstatus = NULL; /* NT_PRSTATUS */
1419 struct elf_prpsinfo *psinfo = NULL; /* NT_PRPSINFO */
1420 struct task_struct *g, *p;
1421 LIST_HEAD(thread_list);
1422 struct list_head *t;
1423 elf_fpregset_t *fpu = NULL;
1424 #ifdef ELF_CORE_COPY_XFPREGS
1425 elf_fpxregset_t *xfpu = NULL;
1427 int thread_status_size = 0;
1431 * We no longer stop all VM operations.
1433 * This is because those proceses that could possibly change map_count or
1434 * the mmap / vma pages are now blocked in do_exit on current finishing
1437 * Only ptrace can touch these memory addresses, but it doesn't change
1438 * the map_count or the pages allocated. So no possibility of crashing
1439 * exists while dumping the mm->vm_next areas to the core file.
1442 /* alloc memory for large data structures: too large to be on stack */
1443 elf = kmalloc(sizeof(*elf), GFP_KERNEL);
1446 prstatus = kmalloc(sizeof(*prstatus), GFP_KERNEL);
1449 psinfo = kmalloc(sizeof(*psinfo), GFP_KERNEL);
1452 notes = kmalloc(NUM_NOTES * sizeof(struct memelfnote), GFP_KERNEL);
1455 fpu = kmalloc(sizeof(*fpu), GFP_KERNEL);
1458 #ifdef ELF_CORE_COPY_XFPREGS
1459 xfpu = kmalloc(sizeof(*xfpu), GFP_KERNEL);
1465 struct elf_thread_status *tmp;
1466 read_lock(&tasklist_lock);
1468 if (current->mm == p->mm && current != p) {
1469 tmp = kmalloc(sizeof(*tmp), GFP_ATOMIC);
1471 read_unlock(&tasklist_lock);
1474 memset(tmp, 0, sizeof(*tmp));
1475 INIT_LIST_HEAD(&tmp->list);
1477 list_add(&tmp->list, &thread_list);
1479 while_each_thread(g,p);
1480 read_unlock(&tasklist_lock);
1481 list_for_each(t, &thread_list) {
1482 struct elf_thread_status *tmp;
1485 tmp = list_entry(t, struct elf_thread_status, list);
1486 sz = elf_dump_thread_status(signr, tmp);
1487 thread_status_size += sz;
1490 /* now collect the dump for the current */
1491 memset(prstatus, 0, sizeof(*prstatus));
1492 fill_prstatus(prstatus, current, signr);
1493 elf_core_copy_regs(&prstatus->pr_reg, regs);
1495 segs = current->mm->map_count;
1496 #ifdef ELF_CORE_EXTRA_PHDRS
1497 segs += ELF_CORE_EXTRA_PHDRS;
1501 fill_elf_header(elf, segs+1); /* including notes section */
1504 current->flags |= PF_DUMPCORE;
1507 * Set up the notes in similar form to SVR4 core dumps made
1508 * with info from their /proc.
1511 fill_note(notes +0, "CORE", NT_PRSTATUS, sizeof(*prstatus), prstatus);
1513 fill_psinfo(psinfo, current->group_leader, current->mm);
1514 fill_note(notes +1, "CORE", NT_PRPSINFO, sizeof(*psinfo), psinfo);
1518 auxv = (elf_addr_t *) current->mm->saved_auxv;
1523 while (auxv[i - 2] != AT_NULL);
1524 fill_note(¬es[numnote++], "CORE", NT_AUXV,
1525 i * sizeof (elf_addr_t), auxv);
1527 /* Try to dump the FPU. */
1528 if ((prstatus->pr_fpvalid = elf_core_copy_task_fpregs(current, regs, fpu)))
1529 fill_note(notes + numnote++,
1530 "CORE", NT_PRFPREG, sizeof(*fpu), fpu);
1531 #ifdef ELF_CORE_COPY_XFPREGS
1532 if (elf_core_copy_task_xfpregs(current, xfpu))
1533 fill_note(notes + numnote++,
1534 "LINUX", NT_PRXFPREG, sizeof(*xfpu), xfpu);
1540 DUMP_WRITE(elf, sizeof(*elf));
1541 offset += sizeof(*elf); /* Elf header */
1542 offset += (segs+1) * sizeof(struct elf_phdr); /* Program headers */
1544 /* Write notes phdr entry */
1546 struct elf_phdr phdr;
1549 for (i = 0; i < numnote; i++)
1550 sz += notesize(notes + i);
1552 sz += thread_status_size;
1554 fill_elf_note_phdr(&phdr, sz, offset);
1556 DUMP_WRITE(&phdr, sizeof(phdr));
1559 /* Page-align dumped data */
1560 dataoff = offset = roundup(offset, ELF_EXEC_PAGESIZE);
1562 /* Write program headers for segments dump */
1563 for (vma = current->mm->mmap; vma != NULL; vma = vma->vm_next) {
1564 struct elf_phdr phdr;
1567 sz = vma->vm_end - vma->vm_start;
1569 phdr.p_type = PT_LOAD;
1570 phdr.p_offset = offset;
1571 phdr.p_vaddr = vma->vm_start;
1573 phdr.p_filesz = maydump(vma) ? sz : 0;
1575 offset += phdr.p_filesz;
1576 phdr.p_flags = vma->vm_flags & VM_READ ? PF_R : 0;
1577 if (vma->vm_flags & VM_WRITE) phdr.p_flags |= PF_W;
1578 if (vma->vm_flags & VM_EXEC) phdr.p_flags |= PF_X;
1579 phdr.p_align = ELF_EXEC_PAGESIZE;
1581 DUMP_WRITE(&phdr, sizeof(phdr));
1584 #ifdef ELF_CORE_WRITE_EXTRA_PHDRS
1585 ELF_CORE_WRITE_EXTRA_PHDRS;
1588 /* write out the notes section */
1589 for (i = 0; i < numnote; i++)
1590 if (!writenote(notes + i, file))
1593 /* write out the thread status notes section */
1594 list_for_each(t, &thread_list) {
1595 struct elf_thread_status *tmp = list_entry(t, struct elf_thread_status, list);
1596 for (i = 0; i < tmp->num_notes; i++)
1597 if (!writenote(&tmp->notes[i], file))
1603 for (vma = current->mm->mmap; vma != NULL; vma = vma->vm_next) {
1609 for (addr = vma->vm_start;
1611 addr += PAGE_SIZE) {
1613 struct vm_area_struct *vma;
1615 if (get_user_pages(current, current->mm, addr, 1, 0, 1,
1616 &page, &vma) <= 0) {
1617 DUMP_SEEK (file->f_pos + PAGE_SIZE);
1619 if (page == ZERO_PAGE(addr)) {
1620 DUMP_SEEK (file->f_pos + PAGE_SIZE);
1623 flush_cache_page(vma, addr, page_to_pfn(page));
1625 if ((size += PAGE_SIZE) > limit ||
1626 !dump_write(file, kaddr,
1629 page_cache_release(page);
1634 page_cache_release(page);
1639 #ifdef ELF_CORE_WRITE_EXTRA_DATA
1640 ELF_CORE_WRITE_EXTRA_DATA;
1643 if ((off_t)file->f_pos != offset) {
1645 printk(KERN_WARNING "elf_core_dump: file->f_pos (%ld) != offset (%ld)\n",
1646 (off_t)file->f_pos, offset);
1653 while (!list_empty(&thread_list)) {
1654 struct list_head *tmp = thread_list.next;
1656 kfree(list_entry(tmp, struct elf_thread_status, list));
1664 #ifdef ELF_CORE_COPY_XFPREGS
1671 #endif /* USE_ELF_CORE_DUMP */
1673 static int __init init_elf_binfmt(void)
1675 return register_binfmt(&elf_format);
1678 static void __exit exit_elf_binfmt(void)
1680 /* Remove the COFF and ELF loaders. */
1681 unregister_binfmt(&elf_format);
1684 core_initcall(init_elf_binfmt);
1685 module_exit(exit_elf_binfmt);
1686 MODULE_LICENSE("GPL");