2 * linux/fs/binfmt_elf.c
4 * These are the functions used to load ELF format executables as used
5 * on SVr4 machines. Information on the format may be found in the book
6 * "UNIX SYSTEM V RELEASE 4 Programmers Guide: Ansi C and Programming Support
9 * Copyright 1993, 1994: Eric Youngdale (ericy@cais.com).
12 #include <linux/module.h>
13 #include <linux/kernel.h>
15 #include <linux/stat.h>
16 #include <linux/time.h>
18 #include <linux/mman.h>
19 #include <linux/a.out.h>
20 #include <linux/errno.h>
21 #include <linux/signal.h>
22 #include <linux/binfmts.h>
23 #include <linux/string.h>
24 #include <linux/file.h>
25 #include <linux/fcntl.h>
26 #include <linux/ptrace.h>
27 #include <linux/slab.h>
28 #include <linux/shm.h>
29 #include <linux/personality.h>
30 #include <linux/elfcore.h>
31 #include <linux/init.h>
32 #include <linux/highuid.h>
33 #include <linux/smp.h>
34 #include <linux/smp_lock.h>
35 #include <linux/compiler.h>
36 #include <linux/highmem.h>
37 #include <linux/pagemap.h>
38 #include <linux/security.h>
39 #include <linux/syscalls.h>
40 #include <linux/random.h>
41 #include <linux/vs_memory.h>
42 #include <linux/vs_cvirt.h>
44 #include <asm/uaccess.h>
45 #include <asm/param.h>
48 #include <linux/elf.h>
50 static int load_elf_binary(struct linux_binprm * bprm, struct pt_regs * regs);
51 static int load_elf_library(struct file*);
52 static unsigned long elf_map (struct file *, unsigned long, struct elf_phdr *, int, int, unsigned long);
53 extern int dump_fpu (struct pt_regs *, elf_fpregset_t *);
56 #define elf_addr_t unsigned long
60 * If we don't support core dumping, then supply a NULL so we
63 #if defined(USE_ELF_CORE_DUMP) && defined(CONFIG_ELF_CORE)
64 static int elf_core_dump(long signr, struct pt_regs * regs, struct file * file);
66 #define elf_core_dump NULL
69 #if ELF_EXEC_PAGESIZE > PAGE_SIZE
70 # define ELF_MIN_ALIGN ELF_EXEC_PAGESIZE
72 # define ELF_MIN_ALIGN PAGE_SIZE
75 #ifndef ELF_CORE_EFLAGS
76 #define ELF_CORE_EFLAGS 0
79 #define ELF_PAGESTART(_v) ((_v) & ~(unsigned long)(ELF_MIN_ALIGN-1))
80 #define ELF_PAGEOFFSET(_v) ((_v) & (ELF_MIN_ALIGN-1))
81 #define ELF_PAGEALIGN(_v) (((_v) + ELF_MIN_ALIGN - 1) & ~(ELF_MIN_ALIGN - 1))
83 static struct linux_binfmt elf_format = {
84 .module = THIS_MODULE,
85 .load_binary = load_elf_binary,
86 .load_shlib = load_elf_library,
87 .core_dump = elf_core_dump,
88 .min_coredump = ELF_EXEC_PAGESIZE
91 #define BAD_ADDR(x) ((unsigned long)(x) >= PAGE_MASK)
93 static int set_brk(unsigned long start, unsigned long end)
95 start = ELF_PAGEALIGN(start);
96 end = ELF_PAGEALIGN(end);
99 down_write(¤t->mm->mmap_sem);
100 addr = do_brk(start, end - start);
101 up_write(¤t->mm->mmap_sem);
105 current->mm->start_brk = current->mm->brk = end;
110 /* We need to explicitly zero any fractional pages
111 after the data section (i.e. bss). This would
112 contain the junk from the file that should not
116 static int padzero(unsigned long elf_bss)
120 nbyte = ELF_PAGEOFFSET(elf_bss);
122 nbyte = ELF_MIN_ALIGN - nbyte;
123 if (clear_user((void __user *) elf_bss, nbyte))
129 /* Let's use some macros to make this stack manipulation a litle clearer */
130 #ifdef CONFIG_STACK_GROWSUP
131 #define STACK_ADD(sp, items) ((elf_addr_t __user *)(sp) + (items))
132 #define STACK_ROUND(sp, items) \
133 ((15 + (unsigned long) ((sp) + (items))) &~ 15UL)
134 #define STACK_ALLOC(sp, len) ({ elf_addr_t __user *old_sp = (elf_addr_t __user *)sp; sp += len; old_sp; })
136 #define STACK_ADD(sp, items) ((elf_addr_t __user *)(sp) - (items))
137 #define STACK_ROUND(sp, items) \
138 (((unsigned long) (sp - items)) &~ 15UL)
139 #define STACK_ALLOC(sp, len) ({ sp -= len ; sp; })
143 create_elf_tables(struct linux_binprm *bprm, struct elfhdr * exec,
144 int interp_aout, unsigned long load_addr,
145 unsigned long interp_load_addr)
147 unsigned long p = bprm->p;
148 int argc = bprm->argc;
149 int envc = bprm->envc;
150 elf_addr_t __user *argv;
151 elf_addr_t __user *envp;
152 elf_addr_t __user *sp;
153 elf_addr_t __user *u_platform;
154 const char *k_platform = ELF_PLATFORM;
156 elf_addr_t *elf_info;
158 struct task_struct *tsk = current;
161 * If this architecture has a platform capability string, copy it
162 * to userspace. In some cases (Sparc), this info is impossible
163 * for userspace to get any other way, in others (i386) it is
169 size_t len = strlen(k_platform) + 1;
172 * In some cases (e.g. Hyper-Threading), we want to avoid L1
173 * evictions by the processes running on the same package. One
174 * thing we can do is to shuffle the initial stack for them.
177 p = arch_align_stack(p);
179 u_platform = (elf_addr_t __user *)STACK_ALLOC(p, len);
180 if (__copy_to_user(u_platform, k_platform, len))
184 /* Create the ELF interpreter info */
185 elf_info = (elf_addr_t *) current->mm->saved_auxv;
186 #define NEW_AUX_ENT(id, val) \
187 do { elf_info[ei_index++] = id; elf_info[ei_index++] = val; } while (0)
191 * ARCH_DLINFO must come first so PPC can do its special alignment of
196 NEW_AUX_ENT(AT_HWCAP, ELF_HWCAP);
197 NEW_AUX_ENT(AT_PAGESZ, ELF_EXEC_PAGESIZE);
198 NEW_AUX_ENT(AT_CLKTCK, CLOCKS_PER_SEC);
199 NEW_AUX_ENT(AT_PHDR, load_addr + exec->e_phoff);
200 NEW_AUX_ENT(AT_PHENT, sizeof (struct elf_phdr));
201 NEW_AUX_ENT(AT_PHNUM, exec->e_phnum);
202 NEW_AUX_ENT(AT_BASE, interp_load_addr);
203 NEW_AUX_ENT(AT_FLAGS, 0);
204 NEW_AUX_ENT(AT_ENTRY, exec->e_entry);
205 NEW_AUX_ENT(AT_UID, (elf_addr_t) tsk->uid);
206 NEW_AUX_ENT(AT_EUID, (elf_addr_t) tsk->euid);
207 NEW_AUX_ENT(AT_GID, (elf_addr_t) tsk->gid);
208 NEW_AUX_ENT(AT_EGID, (elf_addr_t) tsk->egid);
209 NEW_AUX_ENT(AT_SECURE, (elf_addr_t) security_bprm_secureexec(bprm));
211 NEW_AUX_ENT(AT_PLATFORM, (elf_addr_t)(unsigned long)u_platform);
213 if (bprm->interp_flags & BINPRM_FLAGS_EXECFD) {
214 NEW_AUX_ENT(AT_EXECFD, (elf_addr_t) bprm->interp_data);
217 /* AT_NULL is zero; clear the rest too */
218 memset(&elf_info[ei_index], 0,
219 sizeof current->mm->saved_auxv - ei_index * sizeof elf_info[0]);
221 /* And advance past the AT_NULL entry. */
224 sp = STACK_ADD(p, ei_index);
226 items = (argc + 1) + (envc + 1);
228 items += 3; /* a.out interpreters require argv & envp too */
230 items += 1; /* ELF interpreters only put argc on the stack */
232 bprm->p = STACK_ROUND(sp, items);
234 /* Point sp at the lowest address on the stack */
235 #ifdef CONFIG_STACK_GROWSUP
236 sp = (elf_addr_t __user *)bprm->p - items - ei_index;
237 bprm->exec = (unsigned long) sp; /* XXX: PARISC HACK */
239 sp = (elf_addr_t __user *)bprm->p;
242 /* Now, let's put argc (and argv, envp if appropriate) on the stack */
243 if (__put_user(argc, sp++))
247 envp = argv + argc + 1;
248 __put_user((elf_addr_t)(unsigned long)argv, sp++);
249 __put_user((elf_addr_t)(unsigned long)envp, sp++);
252 envp = argv + argc + 1;
255 /* Populate argv and envp */
256 p = current->mm->arg_end = current->mm->arg_start;
259 __put_user((elf_addr_t)p, argv++);
260 len = strnlen_user((void __user *)p, PAGE_SIZE*MAX_ARG_PAGES);
261 if (!len || len > PAGE_SIZE*MAX_ARG_PAGES)
265 if (__put_user(0, argv))
267 current->mm->arg_end = current->mm->env_start = p;
270 __put_user((elf_addr_t)p, envp++);
271 len = strnlen_user((void __user *)p, PAGE_SIZE*MAX_ARG_PAGES);
272 if (!len || len > PAGE_SIZE*MAX_ARG_PAGES)
276 if (__put_user(0, envp))
278 current->mm->env_end = p;
280 /* Put the elf_info on the stack in the right place. */
281 sp = (elf_addr_t __user *)envp + 1;
282 if (copy_to_user(sp, elf_info, ei_index * sizeof(elf_addr_t)))
289 static unsigned long elf_map(struct file *filep, unsigned long addr,
290 struct elf_phdr *eppnt, int prot, int type,
291 unsigned long total_size)
293 unsigned long map_addr;
294 unsigned long size = eppnt->p_filesz + ELF_PAGEOFFSET(eppnt->p_vaddr);
295 unsigned long off = eppnt->p_offset - ELF_PAGEOFFSET(eppnt->p_vaddr);
297 addr = ELF_PAGESTART(addr);
298 size = ELF_PAGEALIGN(size);
300 /* mmap() will return -EINVAL if given a zero size, but a
301 * segment with zero filesize is perfectly valid */
305 down_write(¤t->mm->mmap_sem);
308 * total_size is the size of the ELF (interpreter) image.
309 * The _first_ mmap needs to know the full size, otherwise
310 * randomization might put this image into an overlapping
311 * position with the ELF binary image. (since size < total_size)
312 * So we first map the 'big' image - and unmap the remainder at
313 * the end. (which unmap is needed for ELF images with holes.)
316 total_size = ELF_PAGEALIGN(total_size);
317 map_addr = do_mmap(filep, addr, total_size, prot, type, off);
318 if (!BAD_ADDR(map_addr))
319 do_munmap(current->mm, map_addr+size, total_size-size);
321 map_addr = do_mmap(filep, addr, size, prot, type, off);
323 up_write(¤t->mm->mmap_sem);
328 #endif /* !elf_map */
330 static inline unsigned long total_mapping_size(struct elf_phdr *cmds, int nr)
332 int i, first_idx = -1, last_idx = -1;
334 for (i = 0; i < nr; i++)
335 if (cmds[i].p_type == PT_LOAD) {
344 return cmds[last_idx].p_vaddr + cmds[last_idx].p_memsz -
345 ELF_PAGESTART(cmds[first_idx].p_vaddr);
349 /* This is much more generalized than the library routine read function,
350 so we keep this separate. Technically the library read function
351 is only provided so that we can read a.out libraries that have
354 static unsigned long load_elf_interp(struct elfhdr * interp_elf_ex,
355 struct file * interpreter,
356 unsigned long *interp_map_addr,
357 unsigned long no_base)
359 struct elf_phdr *elf_phdata;
360 struct elf_phdr *eppnt;
361 unsigned long load_addr = 0;
362 int load_addr_set = 0;
363 unsigned long last_bss = 0, elf_bss = 0;
364 unsigned long error = ~0UL;
365 unsigned long total_size;
368 /* First of all, some simple consistency checks */
369 if (interp_elf_ex->e_type != ET_EXEC &&
370 interp_elf_ex->e_type != ET_DYN)
372 if (!elf_check_arch(interp_elf_ex))
374 if (!interpreter->f_op || !interpreter->f_op->mmap)
378 * If the size of this structure has changed, then punt, since
379 * we will be doing the wrong thing.
381 if (interp_elf_ex->e_phentsize != sizeof(struct elf_phdr))
383 if (interp_elf_ex->e_phnum < 1 ||
384 interp_elf_ex->e_phnum > 65536U / sizeof(struct elf_phdr))
387 /* Now read in all of the header information */
389 size = sizeof(struct elf_phdr) * interp_elf_ex->e_phnum;
390 if (size > ELF_MIN_ALIGN)
392 elf_phdata = (struct elf_phdr *) kmalloc(size, GFP_KERNEL);
396 retval = kernel_read(interpreter,interp_elf_ex->e_phoff,(char *)elf_phdata,size);
398 if (retval != size) {
404 total_size = total_mapping_size(elf_phdata, interp_elf_ex->e_phnum);
409 for (i=0; i<interp_elf_ex->e_phnum; i++, eppnt++) {
410 if (eppnt->p_type == PT_LOAD) {
411 int elf_type = MAP_PRIVATE | MAP_DENYWRITE;
413 unsigned long vaddr = 0;
414 unsigned long k, map_addr;
416 if (eppnt->p_flags & PF_R) elf_prot = PROT_READ;
417 if (eppnt->p_flags & PF_W) elf_prot |= PROT_WRITE;
418 if (eppnt->p_flags & PF_X) elf_prot |= PROT_EXEC;
419 vaddr = eppnt->p_vaddr;
420 if (interp_elf_ex->e_type == ET_EXEC || load_addr_set)
421 elf_type |= MAP_FIXED;
422 else if (no_base && interp_elf_ex->e_type == ET_DYN)
425 map_addr = elf_map(interpreter, load_addr + vaddr, eppnt, elf_prot, elf_type, total_size);
427 if (!*interp_map_addr)
428 *interp_map_addr = map_addr;
430 if (BAD_ADDR(map_addr))
433 if (!load_addr_set && interp_elf_ex->e_type == ET_DYN) {
434 load_addr = map_addr - ELF_PAGESTART(vaddr);
439 * Check to see if the section's size will overflow the
440 * allowed task size. Note that p_filesz must always be
441 * <= p_memsize so it is only necessary to check p_memsz.
443 k = load_addr + eppnt->p_vaddr;
444 if (BAD_ADDR(k) || eppnt->p_filesz > eppnt->p_memsz ||
445 eppnt->p_memsz > TASK_SIZE || TASK_SIZE - eppnt->p_memsz < k) {
451 * Find the end of the file mapping for this phdr, and keep
452 * track of the largest address we see for this.
454 k = load_addr + eppnt->p_vaddr + eppnt->p_filesz;
459 * Do the same thing for the memory mapping - between
460 * elf_bss and last_bss is the bss section.
462 k = load_addr + eppnt->p_memsz + eppnt->p_vaddr;
469 * Now fill out the bss section. First pad the last page up
470 * to the page boundary, and then perform a mmap to make sure
471 * that there are zero-mapped pages up to and including the
474 if (padzero(elf_bss)) {
479 elf_bss = ELF_PAGESTART(elf_bss + ELF_MIN_ALIGN - 1); /* What we have mapped so far */
481 /* Map the last of the bss segment */
482 if (last_bss > elf_bss) {
483 down_write(¤t->mm->mmap_sem);
484 error = do_brk(elf_bss, last_bss - elf_bss);
485 up_write(¤t->mm->mmap_sem);
498 static unsigned long load_aout_interp(struct exec * interp_ex,
499 struct file * interpreter)
501 unsigned long text_data, elf_entry = ~0UL;
505 current->mm->end_code = interp_ex->a_text;
506 text_data = interp_ex->a_text + interp_ex->a_data;
507 current->mm->end_data = text_data;
508 current->mm->brk = interp_ex->a_bss + text_data;
510 switch (N_MAGIC(*interp_ex)) {
513 addr = (char __user *)0;
517 offset = N_TXTOFF(*interp_ex);
518 addr = (char __user *) N_TXTADDR(*interp_ex);
524 down_write(¤t->mm->mmap_sem);
525 do_brk(0, text_data);
526 up_write(¤t->mm->mmap_sem);
527 if (!interpreter->f_op || !interpreter->f_op->read)
529 if (interpreter->f_op->read(interpreter, addr, text_data, &offset) < 0)
531 flush_icache_range((unsigned long)addr,
532 (unsigned long)addr + text_data);
535 down_write(¤t->mm->mmap_sem);
536 do_brk(ELF_PAGESTART(text_data + ELF_MIN_ALIGN - 1),
538 up_write(¤t->mm->mmap_sem);
539 elf_entry = interp_ex->a_entry;
546 * These are the functions used to load ELF style executables and shared
547 * libraries. There is no binary dependent code anywhere else.
550 #define INTERPRETER_NONE 0
551 #define INTERPRETER_AOUT 1
552 #define INTERPRETER_ELF 2
554 #ifndef STACK_RND_MASK
555 #define STACK_RND_MASK 0x7ff /* with 4K pages 8MB of VA */
558 static unsigned long randomize_stack_top(unsigned long stack_top)
560 unsigned int random_variable = 0;
562 if (current->flags & PF_RANDOMIZE) {
563 random_variable = get_random_int() & STACK_RND_MASK;
564 random_variable <<= PAGE_SHIFT;
566 #ifdef CONFIG_STACK_GROWSUP
567 return PAGE_ALIGN(stack_top) + random_variable;
569 return PAGE_ALIGN(stack_top) - random_variable;
573 static int load_elf_binary(struct linux_binprm * bprm, struct pt_regs * regs)
575 struct file *interpreter = NULL; /* to shut gcc up */
576 unsigned long load_addr = 0, load_bias = 0;
577 int load_addr_set = 0;
578 char * elf_interpreter = NULL;
579 unsigned int interpreter_type = INTERPRETER_NONE;
580 unsigned char ibcs2_interpreter = 0;
582 struct elf_phdr * elf_ppnt, *elf_phdata;
583 unsigned long elf_bss, elf_brk;
587 unsigned long elf_entry, interp_load_addr = 0, interp_map_addr = 0;
588 unsigned long start_code, end_code, start_data, end_data;
589 unsigned long reloc_func_desc = 0;
590 char passed_fileno[6];
591 struct files_struct *files;
592 int have_pt_gnu_stack, executable_stack;
593 unsigned long def_flags = 0;
595 struct elfhdr elf_ex;
596 struct elfhdr interp_elf_ex;
597 struct exec interp_ex;
600 loc = kmalloc(sizeof(*loc), GFP_KERNEL);
606 /* Get the exec-header */
607 loc->elf_ex = *((struct elfhdr *) bprm->buf);
610 /* First of all, some simple consistency checks */
611 if (memcmp(loc->elf_ex.e_ident, ELFMAG, SELFMAG) != 0)
614 if (loc->elf_ex.e_type != ET_EXEC && loc->elf_ex.e_type != ET_DYN)
616 if (!elf_check_arch(&loc->elf_ex))
618 if (!bprm->file->f_op||!bprm->file->f_op->mmap)
621 /* Now read in all of the header information */
623 if (loc->elf_ex.e_phentsize != sizeof(struct elf_phdr))
625 if (loc->elf_ex.e_phnum < 1 ||
626 loc->elf_ex.e_phnum > 65536U / sizeof(struct elf_phdr))
628 size = loc->elf_ex.e_phnum * sizeof(struct elf_phdr);
630 elf_phdata = (struct elf_phdr *) kmalloc(size, GFP_KERNEL);
634 retval = kernel_read(bprm->file, loc->elf_ex.e_phoff, (char *) elf_phdata, size);
635 if (retval != size) {
641 files = current->files; /* Refcounted so ok */
642 retval = unshare_files();
645 if (files == current->files) {
646 put_files_struct(files);
650 /* exec will make our files private anyway, but for the a.out
651 loader stuff we need to do it earlier */
653 retval = get_unused_fd();
656 get_file(bprm->file);
657 fd_install(elf_exec_fileno = retval, bprm->file);
659 elf_ppnt = elf_phdata;
668 for (i = 0; i < loc->elf_ex.e_phnum; i++) {
669 if (elf_ppnt->p_type == PT_INTERP) {
670 /* This is the program interpreter used for
671 * shared libraries - for now assume that this
672 * is an a.out format binary
676 if (elf_ppnt->p_filesz > PATH_MAX ||
677 elf_ppnt->p_filesz < 2)
681 elf_interpreter = kmalloc(elf_ppnt->p_filesz,
683 if (!elf_interpreter)
686 retval = kernel_read(bprm->file, elf_ppnt->p_offset,
689 if (retval != elf_ppnt->p_filesz) {
692 goto out_free_interp;
694 /* make sure path is NULL terminated */
696 if (elf_interpreter[elf_ppnt->p_filesz - 1] != '\0')
697 goto out_free_interp;
699 /* If the program interpreter is one of these two,
700 * then assume an iBCS2 image. Otherwise assume
701 * a native linux image.
703 if (strcmp(elf_interpreter,"/usr/lib/libc.so.1") == 0 ||
704 strcmp(elf_interpreter,"/usr/lib/ld.so.1") == 0)
705 ibcs2_interpreter = 1;
708 * The early SET_PERSONALITY here is so that the lookup
709 * for the interpreter happens in the namespace of the
710 * to-be-execed image. SET_PERSONALITY can select an
713 * However, SET_PERSONALITY is NOT allowed to switch
714 * this task into the new images's memory mapping
715 * policy - that is, TASK_SIZE must still evaluate to
716 * that which is appropriate to the execing application.
717 * This is because exit_mmap() needs to have TASK_SIZE
718 * evaluate to the size of the old image.
720 * So if (say) a 64-bit application is execing a 32-bit
721 * application it is the architecture's responsibility
722 * to defer changing the value of TASK_SIZE until the
723 * switch really is going to happen - do this in
724 * flush_thread(). - akpm
726 SET_PERSONALITY(loc->elf_ex, ibcs2_interpreter);
728 interpreter = open_exec(elf_interpreter);
729 retval = PTR_ERR(interpreter);
730 if (IS_ERR(interpreter))
731 goto out_free_interp;
732 retval = kernel_read(interpreter, 0, bprm->buf, BINPRM_BUF_SIZE);
733 if (retval != BINPRM_BUF_SIZE) {
736 goto out_free_dentry;
739 /* Get the exec headers */
740 loc->interp_ex = *((struct exec *) bprm->buf);
741 loc->interp_elf_ex = *((struct elfhdr *) bprm->buf);
747 elf_ppnt = elf_phdata;
748 executable_stack = EXSTACK_DEFAULT;
750 for (i = 0; i < loc->elf_ex.e_phnum; i++, elf_ppnt++)
751 if (elf_ppnt->p_type == PT_GNU_STACK) {
752 if (elf_ppnt->p_flags & PF_X)
753 executable_stack = EXSTACK_ENABLE_X;
755 executable_stack = EXSTACK_DISABLE_X;
758 have_pt_gnu_stack = (i < loc->elf_ex.e_phnum);
760 if (current->personality == PER_LINUX && (exec_shield & 2)) {
761 executable_stack = EXSTACK_DISABLE_X;
762 current->flags |= PF_RANDOMIZE;
765 /* Some simple consistency checks for the interpreter */
766 if (elf_interpreter) {
767 interpreter_type = INTERPRETER_ELF | INTERPRETER_AOUT;
769 /* Now figure out which format our binary is */
770 if ((N_MAGIC(loc->interp_ex) != OMAGIC) &&
771 (N_MAGIC(loc->interp_ex) != ZMAGIC) &&
772 (N_MAGIC(loc->interp_ex) != QMAGIC))
773 interpreter_type = INTERPRETER_ELF;
775 if (memcmp(loc->interp_elf_ex.e_ident, ELFMAG, SELFMAG) != 0)
776 interpreter_type &= ~INTERPRETER_ELF;
779 if (!interpreter_type)
780 goto out_free_dentry;
782 /* Make sure only one type was selected */
783 if ((interpreter_type & INTERPRETER_ELF) &&
784 interpreter_type != INTERPRETER_ELF) {
785 // FIXME - ratelimit this before re-enabling
786 // printk(KERN_WARNING "ELF: Ambiguous type, using ELF\n");
787 interpreter_type = INTERPRETER_ELF;
789 /* Verify the interpreter has a valid arch */
790 if ((interpreter_type == INTERPRETER_ELF) &&
791 !elf_check_arch(&loc->interp_elf_ex))
792 goto out_free_dentry;
794 /* Executables without an interpreter also need a personality */
795 SET_PERSONALITY(loc->elf_ex, ibcs2_interpreter);
798 /* OK, we are done with that, now set up the arg stuff,
799 and then start this sucker up */
801 if ((!bprm->sh_bang) && (interpreter_type == INTERPRETER_AOUT)) {
802 char *passed_p = passed_fileno;
803 sprintf(passed_fileno, "%d", elf_exec_fileno);
805 if (elf_interpreter) {
806 retval = copy_strings_kernel(1, &passed_p, bprm);
808 goto out_free_dentry;
813 /* Flush all traces of the currently running executable */
814 retval = flush_old_exec(bprm);
816 goto out_free_dentry;
820 * Turn off the CS limit completely if exec-shield disabled or
823 if (!exec_shield || executable_stack != EXSTACK_DISABLE_X || nx_enabled)
824 arch_add_exec_range(current->mm, -1);
827 /* Discard our unneeded old files struct */
830 put_files_struct(files);
834 /* OK, This is the point of no return */
835 current->mm->start_data = 0;
836 current->mm->end_data = 0;
837 current->mm->end_code = 0;
838 current->mm->mmap = NULL;
839 current->flags &= ~PF_FORKNOEXEC;
840 current->mm->def_flags = def_flags;
842 /* Do this immediately, since STACK_TOP as used in setup_arg_pages
843 may depend on the personality. */
844 SET_PERSONALITY(loc->elf_ex, ibcs2_interpreter);
845 if (!(exec_shield & 2) &&
846 elf_read_implies_exec(loc->elf_ex, executable_stack))
847 current->personality |= READ_IMPLIES_EXEC;
849 if ( !(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
850 current->flags |= PF_RANDOMIZE;
851 arch_pick_mmap_layout(current->mm);
853 /* Do this so that we can load the interpreter, if need be. We will
854 change some of these later */
855 current->mm->free_area_cache = current->mm->mmap_base;
856 current->mm->cached_hole_size = 0;
857 retval = setup_arg_pages(bprm, randomize_stack_top(STACK_TOP),
860 send_sig(SIGKILL, current, 0);
861 goto out_free_dentry;
864 current->mm->start_stack = bprm->p;
867 /* Now we do a little grungy work by mmaping the ELF image into
868 the correct location in memory.
871 for(i = 0, elf_ppnt = elf_phdata; i < loc->elf_ex.e_phnum; i++, elf_ppnt++) {
872 int elf_prot = 0, elf_flags;
873 unsigned long k, vaddr;
875 if (elf_ppnt->p_type != PT_LOAD)
878 if (unlikely (elf_brk > elf_bss)) {
881 /* There was a PT_LOAD segment with p_memsz > p_filesz
882 before this one. Map anonymous pages, if needed,
883 and clear the area. */
884 retval = set_brk (elf_bss + load_bias,
885 elf_brk + load_bias);
887 send_sig(SIGKILL, current, 0);
888 goto out_free_dentry;
890 nbyte = ELF_PAGEOFFSET(elf_bss);
892 nbyte = ELF_MIN_ALIGN - nbyte;
893 if (nbyte > elf_brk - elf_bss)
894 nbyte = elf_brk - elf_bss;
895 if (clear_user((void __user *)elf_bss +
898 * This bss-zeroing can fail if the ELF
899 * file specifies odd protections. So
900 * we don't check the return value
906 if (elf_ppnt->p_flags & PF_R) elf_prot |= PROT_READ;
907 if (elf_ppnt->p_flags & PF_W) elf_prot |= PROT_WRITE;
908 if (elf_ppnt->p_flags & PF_X) elf_prot |= PROT_EXEC;
910 elf_flags = MAP_PRIVATE|MAP_DENYWRITE|MAP_EXECUTABLE;
912 vaddr = elf_ppnt->p_vaddr;
913 if (loc->elf_ex.e_type == ET_EXEC || load_addr_set)
914 elf_flags |= MAP_FIXED;
915 else if (loc->elf_ex.e_type == ET_DYN)
919 load_bias = ELF_PAGESTART(ELF_ET_DYN_BASE - vaddr);
922 error = elf_map(bprm->file, load_bias + vaddr, elf_ppnt, elf_prot, elf_flags, 0);
923 if (BAD_ADDR(error)) {
924 send_sig(SIGKILL, current, 0);
925 goto out_free_dentry;
928 if (!load_addr_set) {
930 load_addr = (elf_ppnt->p_vaddr - elf_ppnt->p_offset);
931 if (loc->elf_ex.e_type == ET_DYN) {
933 ELF_PAGESTART(load_bias + vaddr);
934 load_addr += load_bias;
935 reloc_func_desc = load_bias;
938 k = elf_ppnt->p_vaddr;
939 if (k < start_code) start_code = k;
940 if (start_data < k) start_data = k;
943 * Check to see if the section's size will overflow the
944 * allowed task size. Note that p_filesz must always be
945 * <= p_memsz so it is only necessary to check p_memsz.
947 if (BAD_ADDR(k) || elf_ppnt->p_filesz > elf_ppnt->p_memsz ||
948 elf_ppnt->p_memsz > TASK_SIZE ||
949 TASK_SIZE - elf_ppnt->p_memsz < k) {
950 /* set_brk can never work. Avoid overflows. */
951 send_sig(SIGKILL, current, 0);
952 goto out_free_dentry;
955 k = elf_ppnt->p_vaddr + elf_ppnt->p_filesz;
959 if ((elf_ppnt->p_flags & PF_X) && end_code < k)
963 k = elf_ppnt->p_vaddr + elf_ppnt->p_memsz;
968 loc->elf_ex.e_entry += load_bias;
969 elf_bss += load_bias;
970 elf_brk += load_bias;
971 start_code += load_bias;
972 end_code += load_bias;
973 start_data += load_bias;
974 end_data += load_bias;
976 /* Calling set_brk effectively mmaps the pages that we need
977 * for the bss and break sections. We must do this before
978 * mapping in the interpreter, to make sure it doesn't wind
979 * up getting placed where the bss needs to go.
981 retval = set_brk(elf_bss, elf_brk);
983 send_sig(SIGKILL, current, 0);
984 goto out_free_dentry;
986 if (likely(elf_bss != elf_brk) && unlikely(padzero(elf_bss))) {
987 send_sig(SIGSEGV, current, 0);
988 retval = -EFAULT; /* Nobody gets to see this, but.. */
989 goto out_free_dentry;
992 if (elf_interpreter) {
993 if (interpreter_type == INTERPRETER_AOUT)
994 elf_entry = load_aout_interp(&loc->interp_ex,
997 elf_entry = load_elf_interp(&loc->interp_elf_ex,
1001 if (!BAD_ADDR(elf_entry)) {
1002 /* load_elf_interp() returns relocation adjustment */
1003 interp_load_addr = elf_entry;
1004 elf_entry += loc->interp_elf_ex.e_entry;
1007 if (BAD_ADDR(elf_entry)) {
1008 force_sig(SIGSEGV, current);
1009 retval = IS_ERR((void *)elf_entry) ?
1010 (int)elf_entry : -EINVAL;
1011 goto out_free_dentry;
1013 reloc_func_desc = interp_load_addr;
1015 allow_write_access(interpreter);
1017 kfree(elf_interpreter);
1019 elf_entry = loc->elf_ex.e_entry;
1020 if (BAD_ADDR(elf_entry)) {
1021 force_sig(SIGSEGV, current);
1023 goto out_free_dentry;
1027 if (interpreter_type != INTERPRETER_AOUT)
1028 sys_close(elf_exec_fileno);
1030 set_binfmt(&elf_format);
1032 #ifdef ARCH_HAS_SETUP_ADDITIONAL_PAGES
1033 retval = arch_setup_additional_pages(bprm, executable_stack,
1034 start_code, interp_map_addr);
1036 send_sig(SIGKILL, current, 0);
1039 #endif /* ARCH_HAS_SETUP_ADDITIONAL_PAGES */
1043 compute_creds(bprm);
1044 current->flags &= ~PF_FORKNOEXEC;
1045 create_elf_tables(bprm, &loc->elf_ex, (interpreter_type == INTERPRETER_AOUT),
1046 load_addr, interp_load_addr);
1047 /* N.B. passed_fileno might not be initialized? */
1048 if (interpreter_type == INTERPRETER_AOUT)
1049 current->mm->arg_start += strlen(passed_fileno) + 1;
1050 current->mm->end_code = end_code;
1051 current->mm->start_code = start_code;
1052 current->mm->start_data = start_data;
1053 current->mm->end_data = end_data;
1054 current->mm->start_stack = bprm->p;
1056 #ifdef __HAVE_ARCH_RANDOMIZE_BRK
1057 if (current->flags & PF_RANDOMIZE)
1058 randomize_brk(elf_brk);
1060 if (current->personality & MMAP_PAGE_ZERO) {
1061 /* Why this, you ask??? Well SVr4 maps page 0 as read-only,
1062 and some applications "depend" upon this behavior.
1063 Since we do not have the power to recompile these, we
1064 emulate the SVr4 behavior. Sigh. */
1065 down_write(¤t->mm->mmap_sem);
1066 error = do_mmap(NULL, 0, PAGE_SIZE, PROT_READ | PROT_EXEC,
1067 MAP_FIXED | MAP_PRIVATE, 0);
1068 up_write(¤t->mm->mmap_sem);
1071 #ifdef ELF_PLAT_INIT
1073 * The ABI may specify that certain registers be set up in special
1074 * ways (on i386 %edx is the address of a DT_FINI function, for
1075 * example. In addition, it may also specify (eg, PowerPC64 ELF)
1076 * that the e_entry field is the address of the function descriptor
1077 * for the startup routine, rather than the address of the startup
1078 * routine itself. This macro performs whatever initialization to
1079 * the regs structure is required as well as any relocations to the
1080 * function descriptor entries when executing dynamically links apps.
1082 ELF_PLAT_INIT(regs, reloc_func_desc);
1085 start_thread(regs, elf_entry, bprm->p);
1086 if (unlikely(current->ptrace & PT_PTRACED)) {
1087 if (current->ptrace & PT_TRACE_EXEC)
1088 ptrace_notify ((PTRACE_EVENT_EXEC << 8) | SIGTRAP);
1090 send_sig(SIGTRAP, current, 0);
1100 allow_write_access(interpreter);
1104 kfree(elf_interpreter);
1106 sys_close(elf_exec_fileno);
1109 put_files_struct(current->files);
1110 current->files = files;
1117 /* This is really simpleminded and specialized - we are loading an
1118 a.out library that is given an ELF header. */
1120 static int load_elf_library(struct file *file)
1122 struct elf_phdr *elf_phdata;
1123 struct elf_phdr *eppnt;
1124 unsigned long elf_bss, bss, len;
1125 int retval, error, i, j;
1126 struct elfhdr elf_ex;
1129 retval = kernel_read(file, 0, (char *) &elf_ex, sizeof(elf_ex));
1130 if (retval != sizeof(elf_ex))
1133 if (memcmp(elf_ex.e_ident, ELFMAG, SELFMAG) != 0)
1136 /* First of all, some simple consistency checks */
1137 if (elf_ex.e_type != ET_EXEC || elf_ex.e_phnum > 2 ||
1138 !elf_check_arch(&elf_ex) || !file->f_op || !file->f_op->mmap)
1141 /* Now read in all of the header information */
1143 j = sizeof(struct elf_phdr) * elf_ex.e_phnum;
1144 /* j < ELF_MIN_ALIGN because elf_ex.e_phnum <= 2 */
1147 elf_phdata = kmalloc(j, GFP_KERNEL);
1153 retval = kernel_read(file, elf_ex.e_phoff, (char *)eppnt, j);
1157 for (j = 0, i = 0; i<elf_ex.e_phnum; i++)
1158 if ((eppnt + i)->p_type == PT_LOAD)
1163 while (eppnt->p_type != PT_LOAD)
1166 /* Now use mmap to map the library into memory. */
1167 down_write(¤t->mm->mmap_sem);
1168 error = do_mmap(file,
1169 ELF_PAGESTART(eppnt->p_vaddr),
1171 ELF_PAGEOFFSET(eppnt->p_vaddr)),
1172 PROT_READ | PROT_WRITE | PROT_EXEC,
1173 MAP_FIXED | MAP_PRIVATE | MAP_DENYWRITE,
1175 ELF_PAGEOFFSET(eppnt->p_vaddr)));
1176 up_write(¤t->mm->mmap_sem);
1177 if (error != ELF_PAGESTART(eppnt->p_vaddr))
1180 elf_bss = eppnt->p_vaddr + eppnt->p_filesz;
1181 if (padzero(elf_bss)) {
1186 len = ELF_PAGESTART(eppnt->p_filesz + eppnt->p_vaddr + ELF_MIN_ALIGN - 1);
1187 bss = eppnt->p_memsz + eppnt->p_vaddr;
1189 down_write(¤t->mm->mmap_sem);
1190 do_brk(len, bss - len);
1191 up_write(¤t->mm->mmap_sem);
1202 * Note that some platforms still use traditional core dumps and not
1203 * the ELF core dump. Each platform can select it as appropriate.
1205 #if defined(USE_ELF_CORE_DUMP) && defined(CONFIG_ELF_CORE)
1210 * Modelled on fs/exec.c:aout_core_dump()
1211 * Jeremy Fitzhardinge <jeremy@sw.oz.au>
1214 * These are the only things you should do on a core-file: use only these
1215 * functions to write out all the necessary info.
1217 static int dump_write(struct file *file, const void *addr, int nr)
1219 return file->f_op->write(file, addr, nr, &file->f_pos) == nr;
1222 static int dump_seek(struct file *file, loff_t off)
1224 if (file->f_op->llseek) {
1225 if (file->f_op->llseek(file, off, 0) != off)
1233 * Decide whether a segment is worth dumping; default is yes to be
1234 * sure (missing info is worse than too much; etc).
1235 * Personally I'd include everything, and use the coredump limit...
1237 * I think we should skip something. But I am not sure how. H.J.
1239 static int maydump(struct vm_area_struct *vma)
1241 /* Do not dump I/O mapped devices or special mappings */
1242 if (vma->vm_flags & (VM_IO | VM_RESERVED))
1245 if (vma->vm_flags & VM_DONTEXPAND) /* Kludge for vDSO. */
1248 /* Dump shared memory only if mapped from an anonymous file. */
1249 if (vma->vm_flags & VM_SHARED)
1250 return vma->vm_file->f_dentry->d_inode->i_nlink == 0;
1252 /* If it hasn't been written to, don't write it out */
1259 #define roundup(x, y) ((((x)+((y)-1))/(y))*(y))
1261 /* An ELF note in memory */
1266 unsigned int datasz;
1270 static int notesize(struct memelfnote *en)
1274 sz = sizeof(struct elf_note);
1275 sz += roundup(strlen(en->name) + 1, 4);
1276 sz += roundup(en->datasz, 4);
1281 #define DUMP_WRITE(addr, nr) \
1282 do { if (!dump_write(file, (addr), (nr))) return 0; } while(0)
1283 #define DUMP_SEEK(off) \
1284 do { if (!dump_seek(file, (off))) return 0; } while(0)
1286 static int writenote(struct memelfnote *men, struct file *file)
1290 en.n_namesz = strlen(men->name) + 1;
1291 en.n_descsz = men->datasz;
1292 en.n_type = men->type;
1294 DUMP_WRITE(&en, sizeof(en));
1295 DUMP_WRITE(men->name, en.n_namesz);
1296 /* XXX - cast from long long to long to avoid need for libgcc.a */
1297 DUMP_SEEK(roundup((unsigned long)file->f_pos, 4)); /* XXX */
1298 DUMP_WRITE(men->data, men->datasz);
1299 DUMP_SEEK(roundup((unsigned long)file->f_pos, 4)); /* XXX */
1306 #define DUMP_WRITE(addr, nr) \
1307 if ((size += (nr)) > limit || !dump_write(file, (addr), (nr))) \
1309 #define DUMP_SEEK(off) \
1310 if (!dump_seek(file, (off))) \
1313 static void fill_elf_header(struct elfhdr *elf, int segs)
1315 memcpy(elf->e_ident, ELFMAG, SELFMAG);
1316 elf->e_ident[EI_CLASS] = ELF_CLASS;
1317 elf->e_ident[EI_DATA] = ELF_DATA;
1318 elf->e_ident[EI_VERSION] = EV_CURRENT;
1319 elf->e_ident[EI_OSABI] = ELF_OSABI;
1320 memset(elf->e_ident+EI_PAD, 0, EI_NIDENT-EI_PAD);
1322 elf->e_type = ET_CORE;
1323 elf->e_machine = ELF_ARCH;
1324 elf->e_version = EV_CURRENT;
1326 elf->e_phoff = sizeof(struct elfhdr);
1328 elf->e_flags = ELF_CORE_EFLAGS;
1329 elf->e_ehsize = sizeof(struct elfhdr);
1330 elf->e_phentsize = sizeof(struct elf_phdr);
1331 elf->e_phnum = segs;
1332 elf->e_shentsize = 0;
1334 elf->e_shstrndx = 0;
1338 static void fill_elf_note_phdr(struct elf_phdr *phdr, int sz, off_t offset)
1340 phdr->p_type = PT_NOTE;
1341 phdr->p_offset = offset;
1344 phdr->p_filesz = sz;
1351 static void fill_note(struct memelfnote *note, const char *name, int type,
1352 unsigned int sz, void *data)
1362 * fill up all the fields in prstatus from the given task struct, except registers
1363 * which need to be filled up separately.
1365 static void fill_prstatus(struct elf_prstatus *prstatus,
1366 struct task_struct *p, long signr)
1368 prstatus->pr_info.si_signo = prstatus->pr_cursig = signr;
1369 prstatus->pr_sigpend = p->pending.signal.sig[0];
1370 prstatus->pr_sighold = p->blocked.sig[0];
1371 prstatus->pr_pid = p->pid;
1372 prstatus->pr_ppid = p->parent->pid;
1373 prstatus->pr_pgrp = process_group(p);
1374 prstatus->pr_sid = p->signal->session;
1375 if (thread_group_leader(p)) {
1377 * This is the record for the group leader. Add in the
1378 * cumulative times of previous dead threads. This total
1379 * won't include the time of each live thread whose state
1380 * is included in the core dump. The final total reported
1381 * to our parent process when it calls wait4 will include
1382 * those sums as well as the little bit more time it takes
1383 * this and each other thread to finish dying after the
1384 * core dump synchronization phase.
1386 cputime_to_timeval(cputime_add(p->utime, p->signal->utime),
1387 &prstatus->pr_utime);
1388 cputime_to_timeval(cputime_add(p->stime, p->signal->stime),
1389 &prstatus->pr_stime);
1391 cputime_to_timeval(p->utime, &prstatus->pr_utime);
1392 cputime_to_timeval(p->stime, &prstatus->pr_stime);
1394 cputime_to_timeval(p->signal->cutime, &prstatus->pr_cutime);
1395 cputime_to_timeval(p->signal->cstime, &prstatus->pr_cstime);
1398 static int fill_psinfo(struct elf_prpsinfo *psinfo, struct task_struct *p,
1399 struct mm_struct *mm)
1401 unsigned int i, len;
1403 /* first copy the parameters from user space */
1404 memset(psinfo, 0, sizeof(struct elf_prpsinfo));
1406 len = mm->arg_end - mm->arg_start;
1407 if (len >= ELF_PRARGSZ)
1408 len = ELF_PRARGSZ-1;
1409 if (copy_from_user(&psinfo->pr_psargs,
1410 (const char __user *)mm->arg_start, len))
1412 for(i = 0; i < len; i++)
1413 if (psinfo->pr_psargs[i] == 0)
1414 psinfo->pr_psargs[i] = ' ';
1415 psinfo->pr_psargs[len] = 0;
1417 psinfo->pr_pid = p->pid;
1418 psinfo->pr_ppid = p->parent->pid;
1419 psinfo->pr_pgrp = process_group(p);
1420 psinfo->pr_sid = p->signal->session;
1422 i = p->state ? ffz(~p->state) + 1 : 0;
1423 psinfo->pr_state = i;
1424 psinfo->pr_sname = (i > 5) ? '.' : "RSDTZW"[i];
1425 psinfo->pr_zomb = psinfo->pr_sname == 'Z';
1426 psinfo->pr_nice = task_nice(p);
1427 psinfo->pr_flag = p->flags;
1428 SET_UID(psinfo->pr_uid, p->uid);
1429 SET_GID(psinfo->pr_gid, p->gid);
1430 strncpy(psinfo->pr_fname, p->comm, sizeof(psinfo->pr_fname));
1435 /* Here is the structure in which status of each thread is captured. */
1436 struct elf_thread_status
1438 struct list_head list;
1439 struct elf_prstatus prstatus; /* NT_PRSTATUS */
1440 elf_fpregset_t fpu; /* NT_PRFPREG */
1441 struct task_struct *thread;
1442 #ifdef ELF_CORE_COPY_XFPREGS
1443 elf_fpxregset_t xfpu; /* NT_PRXFPREG */
1445 struct memelfnote notes[3];
1450 * In order to add the specific thread information for the elf file format,
1451 * we need to keep a linked list of every threads pr_status and then
1452 * create a single section for them in the final core file.
1454 static int elf_dump_thread_status(long signr, struct elf_thread_status *t)
1457 struct task_struct *p = t->thread;
1460 fill_prstatus(&t->prstatus, p, signr);
1461 elf_core_copy_task_regs(p, &t->prstatus.pr_reg);
1463 fill_note(&t->notes[0], "CORE", NT_PRSTATUS, sizeof(t->prstatus), &(t->prstatus));
1465 sz += notesize(&t->notes[0]);
1467 if ((t->prstatus.pr_fpvalid = elf_core_copy_task_fpregs(p, NULL, &t->fpu))) {
1468 fill_note(&t->notes[1], "CORE", NT_PRFPREG, sizeof(t->fpu), &(t->fpu));
1470 sz += notesize(&t->notes[1]);
1473 #ifdef ELF_CORE_COPY_XFPREGS
1474 if (elf_core_copy_task_xfpregs(p, &t->xfpu)) {
1475 fill_note(&t->notes[2], "LINUX", NT_PRXFPREG, sizeof(t->xfpu), &t->xfpu);
1477 sz += notesize(&t->notes[2]);
1486 * This is a two-pass process; first we find the offsets of the bits,
1487 * and then they are actually written out. If we run out of core limit
1490 static int elf_core_dump(long signr, struct pt_regs * regs, struct file * file)
1498 struct vm_area_struct *vma;
1499 struct elfhdr *elf = NULL;
1500 off_t offset = 0, dataoff;
1501 unsigned long limit = current->signal->rlim[RLIMIT_CORE].rlim_cur;
1503 struct memelfnote *notes = NULL;
1504 struct elf_prstatus *prstatus = NULL; /* NT_PRSTATUS */
1505 struct elf_prpsinfo *psinfo = NULL; /* NT_PRPSINFO */
1506 struct task_struct *g, *p;
1507 LIST_HEAD(thread_list);
1508 struct list_head *t;
1509 elf_fpregset_t *fpu = NULL;
1510 #ifdef ELF_CORE_COPY_XFPREGS
1511 elf_fpxregset_t *xfpu = NULL;
1513 int thread_status_size = 0;
1517 * We no longer stop all VM operations.
1519 * This is because those proceses that could possibly change map_count or
1520 * the mmap / vma pages are now blocked in do_exit on current finishing
1523 * Only ptrace can touch these memory addresses, but it doesn't change
1524 * the map_count or the pages allocated. So no possibility of crashing
1525 * exists while dumping the mm->vm_next areas to the core file.
1528 /* alloc memory for large data structures: too large to be on stack */
1529 elf = kmalloc(sizeof(*elf), GFP_KERNEL);
1532 prstatus = kmalloc(sizeof(*prstatus), GFP_KERNEL);
1535 psinfo = kmalloc(sizeof(*psinfo), GFP_KERNEL);
1538 notes = kmalloc(NUM_NOTES * sizeof(struct memelfnote), GFP_KERNEL);
1541 fpu = kmalloc(sizeof(*fpu), GFP_KERNEL);
1544 #ifdef ELF_CORE_COPY_XFPREGS
1545 xfpu = kmalloc(sizeof(*xfpu), GFP_KERNEL);
1551 struct elf_thread_status *tmp;
1552 read_lock(&tasklist_lock);
1554 if (current->mm == p->mm && current != p) {
1555 tmp = kzalloc(sizeof(*tmp), GFP_ATOMIC);
1557 read_unlock(&tasklist_lock);
1560 INIT_LIST_HEAD(&tmp->list);
1562 list_add(&tmp->list, &thread_list);
1564 while_each_thread(g,p);
1565 read_unlock(&tasklist_lock);
1566 list_for_each(t, &thread_list) {
1567 struct elf_thread_status *tmp;
1570 tmp = list_entry(t, struct elf_thread_status, list);
1571 sz = elf_dump_thread_status(signr, tmp);
1572 thread_status_size += sz;
1575 /* now collect the dump for the current */
1576 memset(prstatus, 0, sizeof(*prstatus));
1577 fill_prstatus(prstatus, current, signr);
1578 elf_core_copy_regs(&prstatus->pr_reg, regs);
1580 segs = current->mm->map_count;
1581 #ifdef ELF_CORE_EXTRA_PHDRS
1582 segs += ELF_CORE_EXTRA_PHDRS;
1586 fill_elf_header(elf, segs+1); /* including notes section */
1589 current->flags |= PF_DUMPCORE;
1592 * Set up the notes in similar form to SVR4 core dumps made
1593 * with info from their /proc.
1596 fill_note(notes +0, "CORE", NT_PRSTATUS, sizeof(*prstatus), prstatus);
1598 fill_psinfo(psinfo, current->group_leader, current->mm);
1599 fill_note(notes +1, "CORE", NT_PRPSINFO, sizeof(*psinfo), psinfo);
1603 auxv = (elf_addr_t *) current->mm->saved_auxv;
1608 while (auxv[i - 2] != AT_NULL);
1609 fill_note(¬es[numnote++], "CORE", NT_AUXV,
1610 i * sizeof (elf_addr_t), auxv);
1612 /* Try to dump the FPU. */
1613 if ((prstatus->pr_fpvalid = elf_core_copy_task_fpregs(current, regs, fpu)))
1614 fill_note(notes + numnote++,
1615 "CORE", NT_PRFPREG, sizeof(*fpu), fpu);
1616 #ifdef ELF_CORE_COPY_XFPREGS
1617 if (elf_core_copy_task_xfpregs(current, xfpu))
1618 fill_note(notes + numnote++,
1619 "LINUX", NT_PRXFPREG, sizeof(*xfpu), xfpu);
1625 DUMP_WRITE(elf, sizeof(*elf));
1626 offset += sizeof(*elf); /* Elf header */
1627 offset += (segs+1) * sizeof(struct elf_phdr); /* Program headers */
1629 /* Write notes phdr entry */
1631 struct elf_phdr phdr;
1634 for (i = 0; i < numnote; i++)
1635 sz += notesize(notes + i);
1637 sz += thread_status_size;
1639 fill_elf_note_phdr(&phdr, sz, offset);
1641 DUMP_WRITE(&phdr, sizeof(phdr));
1644 /* Page-align dumped data */
1645 dataoff = offset = roundup(offset, ELF_EXEC_PAGESIZE);
1647 /* Write program headers for segments dump */
1648 for (vma = current->mm->mmap; vma != NULL; vma = vma->vm_next) {
1649 struct elf_phdr phdr;
1652 sz = vma->vm_end - vma->vm_start;
1654 phdr.p_type = PT_LOAD;
1655 phdr.p_offset = offset;
1656 phdr.p_vaddr = vma->vm_start;
1658 phdr.p_filesz = maydump(vma) ? sz : 0;
1660 offset += phdr.p_filesz;
1661 phdr.p_flags = vma->vm_flags & VM_READ ? PF_R : 0;
1662 if (vma->vm_flags & VM_WRITE) phdr.p_flags |= PF_W;
1663 if (vma->vm_flags & VM_EXEC) phdr.p_flags |= PF_X;
1664 phdr.p_align = ELF_EXEC_PAGESIZE;
1666 DUMP_WRITE(&phdr, sizeof(phdr));
1669 #ifdef ELF_CORE_WRITE_EXTRA_PHDRS
1670 ELF_CORE_WRITE_EXTRA_PHDRS;
1673 /* write out the notes section */
1674 for (i = 0; i < numnote; i++)
1675 if (!writenote(notes + i, file))
1678 /* write out the thread status notes section */
1679 list_for_each(t, &thread_list) {
1680 struct elf_thread_status *tmp = list_entry(t, struct elf_thread_status, list);
1681 for (i = 0; i < tmp->num_notes; i++)
1682 if (!writenote(&tmp->notes[i], file))
1688 for (vma = current->mm->mmap; vma != NULL; vma = vma->vm_next) {
1694 for (addr = vma->vm_start;
1696 addr += PAGE_SIZE) {
1698 struct vm_area_struct *vma;
1700 if (get_user_pages(current, current->mm, addr, 1, 0, 1,
1701 &page, &vma) <= 0) {
1702 DUMP_SEEK (file->f_pos + PAGE_SIZE);
1704 if (page == ZERO_PAGE(addr)) {
1705 DUMP_SEEK (file->f_pos + PAGE_SIZE);
1708 flush_cache_page(vma, addr, page_to_pfn(page));
1710 if ((size += PAGE_SIZE) > limit ||
1711 !dump_write(file, kaddr,
1714 page_cache_release(page);
1719 page_cache_release(page);
1724 #ifdef ELF_CORE_WRITE_EXTRA_DATA
1725 ELF_CORE_WRITE_EXTRA_DATA;
1728 if ((off_t)file->f_pos != offset) {
1730 printk(KERN_WARNING "elf_core_dump: file->f_pos (%ld) != offset (%ld)\n",
1731 (off_t)file->f_pos, offset);
1738 while (!list_empty(&thread_list)) {
1739 struct list_head *tmp = thread_list.next;
1741 kfree(list_entry(tmp, struct elf_thread_status, list));
1749 #ifdef ELF_CORE_COPY_XFPREGS
1756 #endif /* USE_ELF_CORE_DUMP */
1758 static int __init init_elf_binfmt(void)
1760 return register_binfmt(&elf_format);
1763 static void __exit exit_elf_binfmt(void)
1765 /* Remove the COFF and ELF loaders. */
1766 unregister_binfmt(&elf_format);
1769 core_initcall(init_elf_binfmt);
1770 module_exit(exit_elf_binfmt);
1771 MODULE_LICENSE("GPL");