2 * linux/fs/binfmt_elf.c
4 * These are the functions used to load ELF format executables as used
5 * on SVr4 machines. Information on the format may be found in the book
6 * "UNIX SYSTEM V RELEASE 4 Programmers Guide: Ansi C and Programming Support
9 * Copyright 1993, 1994: Eric Youngdale (ericy@cais.com).
12 #include <linux/module.h>
13 #include <linux/kernel.h>
15 #include <linux/stat.h>
16 #include <linux/time.h>
18 #include <linux/mman.h>
19 #include <linux/a.out.h>
20 #include <linux/errno.h>
21 #include <linux/signal.h>
22 #include <linux/binfmts.h>
23 #include <linux/string.h>
24 #include <linux/file.h>
25 #include <linux/fcntl.h>
26 #include <linux/ptrace.h>
27 #include <linux/slab.h>
28 #include <linux/shm.h>
29 #include <linux/personality.h>
30 #include <linux/elfcore.h>
31 #include <linux/init.h>
32 #include <linux/highuid.h>
33 #include <linux/smp.h>
34 #include <linux/smp_lock.h>
35 #include <linux/compiler.h>
36 #include <linux/highmem.h>
37 #include <linux/pagemap.h>
38 #include <linux/security.h>
39 #include <linux/syscalls.h>
40 #include <linux/random.h>
41 #include <linux/vs_memory.h>
42 #include <linux/vs_cvirt.h>
44 #include <asm/uaccess.h>
45 #include <asm/param.h>
48 #include <linux/elf.h>
50 static int load_elf_binary(struct linux_binprm * bprm, struct pt_regs * regs);
51 static int load_elf_library(struct file*);
52 static unsigned long elf_map (struct file *, unsigned long, struct elf_phdr *, int, int, unsigned long);
53 extern int dump_fpu (struct pt_regs *, elf_fpregset_t *);
56 #define elf_addr_t unsigned long
60 * If we don't support core dumping, then supply a NULL so we
63 #if defined(USE_ELF_CORE_DUMP) && defined(CONFIG_ELF_CORE)
64 static int elf_core_dump(long signr, struct pt_regs * regs, struct file * file);
66 #define elf_core_dump NULL
69 #if ELF_EXEC_PAGESIZE > PAGE_SIZE
70 # define ELF_MIN_ALIGN ELF_EXEC_PAGESIZE
72 # define ELF_MIN_ALIGN PAGE_SIZE
75 #ifndef ELF_CORE_EFLAGS
76 #define ELF_CORE_EFLAGS 0
79 #define ELF_PAGESTART(_v) ((_v) & ~(unsigned long)(ELF_MIN_ALIGN-1))
80 #define ELF_PAGEOFFSET(_v) ((_v) & (ELF_MIN_ALIGN-1))
81 #define ELF_PAGEALIGN(_v) (((_v) + ELF_MIN_ALIGN - 1) & ~(ELF_MIN_ALIGN - 1))
83 static struct linux_binfmt elf_format = {
84 .module = THIS_MODULE,
85 .load_binary = load_elf_binary,
86 .load_shlib = load_elf_library,
87 .core_dump = elf_core_dump,
88 .min_coredump = ELF_EXEC_PAGESIZE
91 #define BAD_ADDR(x) ((unsigned long)(x) > PAGE_MASK)
93 static int set_brk(unsigned long start, unsigned long end)
95 start = ELF_PAGEALIGN(start);
96 end = ELF_PAGEALIGN(end);
99 down_write(¤t->mm->mmap_sem);
100 addr = do_brk(start, end - start);
101 up_write(¤t->mm->mmap_sem);
105 current->mm->start_brk = current->mm->brk = end;
110 /* We need to explicitly zero any fractional pages
111 after the data section (i.e. bss). This would
112 contain the junk from the file that should not
116 static int padzero(unsigned long elf_bss)
120 nbyte = ELF_PAGEOFFSET(elf_bss);
122 nbyte = ELF_MIN_ALIGN - nbyte;
123 if (clear_user((void __user *) elf_bss, nbyte))
129 /* Let's use some macros to make this stack manipulation a litle clearer */
130 #ifdef CONFIG_STACK_GROWSUP
131 #define STACK_ADD(sp, items) ((elf_addr_t __user *)(sp) + (items))
132 #define STACK_ROUND(sp, items) \
133 ((15 + (unsigned long) ((sp) + (items))) &~ 15UL)
134 #define STACK_ALLOC(sp, len) ({ elf_addr_t __user *old_sp = (elf_addr_t __user *)sp; sp += len; old_sp; })
136 #define STACK_ADD(sp, items) ((elf_addr_t __user *)(sp) - (items))
137 #define STACK_ROUND(sp, items) \
138 (((unsigned long) (sp - items)) &~ 15UL)
139 #define STACK_ALLOC(sp, len) ({ sp -= len ; sp; })
143 create_elf_tables(struct linux_binprm *bprm, struct elfhdr * exec,
144 int interp_aout, unsigned long load_addr,
145 unsigned long interp_load_addr)
147 unsigned long p = bprm->p;
148 int argc = bprm->argc;
149 int envc = bprm->envc;
150 elf_addr_t __user *argv;
151 elf_addr_t __user *envp;
152 elf_addr_t __user *sp;
153 elf_addr_t __user *u_platform;
154 const char *k_platform = ELF_PLATFORM;
156 elf_addr_t *elf_info;
158 struct task_struct *tsk = current;
161 * If this architecture has a platform capability string, copy it
162 * to userspace. In some cases (Sparc), this info is impossible
163 * for userspace to get any other way, in others (i386) it is
169 size_t len = strlen(k_platform) + 1;
172 * In some cases (e.g. Hyper-Threading), we want to avoid L1
173 * evictions by the processes running on the same package. One
174 * thing we can do is to shuffle the initial stack for them.
177 p = arch_align_stack(p);
179 u_platform = (elf_addr_t __user *)STACK_ALLOC(p, len);
180 if (__copy_to_user(u_platform, k_platform, len))
184 /* Create the ELF interpreter info */
185 elf_info = (elf_addr_t *) current->mm->saved_auxv;
186 #define NEW_AUX_ENT(id, val) \
187 do { elf_info[ei_index++] = id; elf_info[ei_index++] = val; } while (0)
191 * ARCH_DLINFO must come first so PPC can do its special alignment of
196 NEW_AUX_ENT(AT_HWCAP, ELF_HWCAP);
197 NEW_AUX_ENT(AT_PAGESZ, ELF_EXEC_PAGESIZE);
198 NEW_AUX_ENT(AT_CLKTCK, CLOCKS_PER_SEC);
199 NEW_AUX_ENT(AT_PHDR, load_addr + exec->e_phoff);
200 NEW_AUX_ENT(AT_PHENT, sizeof (struct elf_phdr));
201 NEW_AUX_ENT(AT_PHNUM, exec->e_phnum);
202 NEW_AUX_ENT(AT_BASE, interp_load_addr);
203 NEW_AUX_ENT(AT_FLAGS, 0);
204 NEW_AUX_ENT(AT_ENTRY, exec->e_entry);
205 NEW_AUX_ENT(AT_UID, (elf_addr_t) tsk->uid);
206 NEW_AUX_ENT(AT_EUID, (elf_addr_t) tsk->euid);
207 NEW_AUX_ENT(AT_GID, (elf_addr_t) tsk->gid);
208 NEW_AUX_ENT(AT_EGID, (elf_addr_t) tsk->egid);
209 NEW_AUX_ENT(AT_SECURE, (elf_addr_t) security_bprm_secureexec(bprm));
211 NEW_AUX_ENT(AT_PLATFORM, (elf_addr_t)(unsigned long)u_platform);
213 if (bprm->interp_flags & BINPRM_FLAGS_EXECFD) {
214 NEW_AUX_ENT(AT_EXECFD, (elf_addr_t) bprm->interp_data);
217 /* AT_NULL is zero; clear the rest too */
218 memset(&elf_info[ei_index], 0,
219 sizeof current->mm->saved_auxv - ei_index * sizeof elf_info[0]);
221 /* And advance past the AT_NULL entry. */
224 sp = STACK_ADD(p, ei_index);
226 items = (argc + 1) + (envc + 1);
228 items += 3; /* a.out interpreters require argv & envp too */
230 items += 1; /* ELF interpreters only put argc on the stack */
232 bprm->p = STACK_ROUND(sp, items);
234 /* Point sp at the lowest address on the stack */
235 #ifdef CONFIG_STACK_GROWSUP
236 sp = (elf_addr_t __user *)bprm->p - items - ei_index;
237 bprm->exec = (unsigned long) sp; /* XXX: PARISC HACK */
239 sp = (elf_addr_t __user *)bprm->p;
242 /* Now, let's put argc (and argv, envp if appropriate) on the stack */
243 if (__put_user(argc, sp++))
247 envp = argv + argc + 1;
248 __put_user((elf_addr_t)(unsigned long)argv, sp++);
249 __put_user((elf_addr_t)(unsigned long)envp, sp++);
252 envp = argv + argc + 1;
255 /* Populate argv and envp */
256 p = current->mm->arg_end = current->mm->arg_start;
259 __put_user((elf_addr_t)p, argv++);
260 len = strnlen_user((void __user *)p, PAGE_SIZE*MAX_ARG_PAGES);
261 if (!len || len > PAGE_SIZE*MAX_ARG_PAGES)
265 if (__put_user(0, argv))
267 current->mm->arg_end = current->mm->env_start = p;
270 __put_user((elf_addr_t)p, envp++);
271 len = strnlen_user((void __user *)p, PAGE_SIZE*MAX_ARG_PAGES);
272 if (!len || len > PAGE_SIZE*MAX_ARG_PAGES)
276 if (__put_user(0, envp))
278 current->mm->env_end = p;
280 /* Put the elf_info on the stack in the right place. */
281 sp = (elf_addr_t __user *)envp + 1;
282 if (copy_to_user(sp, elf_info, ei_index * sizeof(elf_addr_t)))
289 static unsigned long elf_map(struct file *filep, unsigned long addr,
290 struct elf_phdr *eppnt, int prot, int type,
291 unsigned long total_size)
293 unsigned long map_addr;
294 unsigned long size = eppnt->p_filesz + ELF_PAGEOFFSET(eppnt->p_vaddr);
295 unsigned long off = eppnt->p_offset - ELF_PAGEOFFSET(eppnt->p_vaddr);
297 addr = ELF_PAGESTART(addr);
298 size = ELF_PAGEALIGN(size);
300 /* mmap() will return -EINVAL if given a zero size, but a
301 * segment with zero filesize is perfectly valid */
305 down_write(¤t->mm->mmap_sem);
308 * total_size is the size of the ELF (interpreter) image.
309 * The _first_ mmap needs to know the full size, otherwise
310 * randomization might put this image into an overlapping
311 * position with the ELF binary image. (since size < total_size)
312 * So we first map the 'big' image - and unmap the remainder at
313 * the end. (which unmap is needed for ELF images with holes.)
316 total_size = ELF_PAGEALIGN(total_size);
317 map_addr = do_mmap(filep, addr, total_size, prot, type, off);
318 if (!BAD_ADDR(map_addr))
319 do_munmap(current->mm, map_addr+size, total_size-size);
321 map_addr = do_mmap(filep, addr, size, prot, type, off);
323 up_write(¤t->mm->mmap_sem);
328 #endif /* !elf_map */
330 static inline unsigned long total_mapping_size(struct elf_phdr *cmds, int nr)
332 int i, first_idx = -1, last_idx = -1;
334 for (i = 0; i < nr; i++)
335 if (cmds[i].p_type == PT_LOAD) {
344 return cmds[last_idx].p_vaddr + cmds[last_idx].p_memsz -
345 ELF_PAGESTART(cmds[first_idx].p_vaddr);
349 /* This is much more generalized than the library routine read function,
350 so we keep this separate. Technically the library read function
351 is only provided so that we can read a.out libraries that have
354 static unsigned long load_elf_interp(struct elfhdr * interp_elf_ex,
355 struct file * interpreter,
356 unsigned long *interp_map_addr,
357 unsigned long no_base)
359 struct elf_phdr *elf_phdata;
360 struct elf_phdr *eppnt;
361 unsigned long load_addr = 0;
362 int load_addr_set = 0;
363 unsigned long last_bss = 0, elf_bss = 0;
364 unsigned long error = ~0UL;
365 unsigned long total_size;
368 /* First of all, some simple consistency checks */
369 if (interp_elf_ex->e_type != ET_EXEC &&
370 interp_elf_ex->e_type != ET_DYN)
372 if (!elf_check_arch(interp_elf_ex))
374 if (!interpreter->f_op || !interpreter->f_op->mmap)
378 * If the size of this structure has changed, then punt, since
379 * we will be doing the wrong thing.
381 if (interp_elf_ex->e_phentsize != sizeof(struct elf_phdr))
383 if (interp_elf_ex->e_phnum < 1 ||
384 interp_elf_ex->e_phnum > 65536U / sizeof(struct elf_phdr))
387 /* Now read in all of the header information */
389 size = sizeof(struct elf_phdr) * interp_elf_ex->e_phnum;
390 if (size > ELF_MIN_ALIGN)
392 elf_phdata = (struct elf_phdr *) kmalloc(size, GFP_KERNEL);
396 retval = kernel_read(interpreter,interp_elf_ex->e_phoff,(char *)elf_phdata,size);
398 if (retval != size) {
404 total_size = total_mapping_size(elf_phdata, interp_elf_ex->e_phnum);
409 for (i=0; i<interp_elf_ex->e_phnum; i++, eppnt++) {
410 if (eppnt->p_type == PT_LOAD) {
411 int elf_type = MAP_PRIVATE | MAP_DENYWRITE;
413 unsigned long vaddr = 0;
414 unsigned long k, map_addr;
416 if (eppnt->p_flags & PF_R) elf_prot = PROT_READ;
417 if (eppnt->p_flags & PF_W) elf_prot |= PROT_WRITE;
418 if (eppnt->p_flags & PF_X) elf_prot |= PROT_EXEC;
419 vaddr = eppnt->p_vaddr;
420 if (interp_elf_ex->e_type == ET_EXEC || load_addr_set)
421 elf_type |= MAP_FIXED;
422 else if (no_base && interp_elf_ex->e_type == ET_DYN)
425 map_addr = elf_map(interpreter, load_addr + vaddr, eppnt, elf_prot, elf_type, total_size);
427 if (!*interp_map_addr)
428 *interp_map_addr = map_addr;
430 if (BAD_ADDR(map_addr))
433 if (!load_addr_set && interp_elf_ex->e_type == ET_DYN) {
434 load_addr = map_addr - ELF_PAGESTART(vaddr);
439 * Check to see if the section's size will overflow the
440 * allowed task size. Note that p_filesz must always be
441 * <= p_memsize so it is only necessary to check p_memsz.
443 k = load_addr + eppnt->p_vaddr;
444 if (k > TASK_SIZE || eppnt->p_filesz > eppnt->p_memsz ||
445 eppnt->p_memsz > TASK_SIZE || TASK_SIZE - eppnt->p_memsz < k) {
451 * Find the end of the file mapping for this phdr, and keep
452 * track of the largest address we see for this.
454 k = load_addr + eppnt->p_vaddr + eppnt->p_filesz;
459 * Do the same thing for the memory mapping - between
460 * elf_bss and last_bss is the bss section.
462 k = load_addr + eppnt->p_memsz + eppnt->p_vaddr;
469 * Now fill out the bss section. First pad the last page up
470 * to the page boundary, and then perform a mmap to make sure
471 * that there are zero-mapped pages up to and including the
474 if (padzero(elf_bss)) {
479 elf_bss = ELF_PAGESTART(elf_bss + ELF_MIN_ALIGN - 1); /* What we have mapped so far */
481 /* Map the last of the bss segment */
482 if (last_bss > elf_bss) {
483 down_write(¤t->mm->mmap_sem);
484 error = do_brk(elf_bss, last_bss - elf_bss);
485 up_write(¤t->mm->mmap_sem);
498 static unsigned long load_aout_interp(struct exec * interp_ex,
499 struct file * interpreter)
501 unsigned long text_data, elf_entry = ~0UL;
505 current->mm->end_code = interp_ex->a_text;
506 text_data = interp_ex->a_text + interp_ex->a_data;
507 current->mm->end_data = text_data;
508 current->mm->brk = interp_ex->a_bss + text_data;
510 switch (N_MAGIC(*interp_ex)) {
513 addr = (char __user *)0;
517 offset = N_TXTOFF(*interp_ex);
518 addr = (char __user *) N_TXTADDR(*interp_ex);
524 down_write(¤t->mm->mmap_sem);
525 do_brk(0, text_data);
526 up_write(¤t->mm->mmap_sem);
527 if (!interpreter->f_op || !interpreter->f_op->read)
529 if (interpreter->f_op->read(interpreter, addr, text_data, &offset) < 0)
531 flush_icache_range((unsigned long)addr,
532 (unsigned long)addr + text_data);
535 down_write(¤t->mm->mmap_sem);
536 do_brk(ELF_PAGESTART(text_data + ELF_MIN_ALIGN - 1),
538 up_write(¤t->mm->mmap_sem);
539 elf_entry = interp_ex->a_entry;
546 * These are the functions used to load ELF style executables and shared
547 * libraries. There is no binary dependent code anywhere else.
550 #define INTERPRETER_NONE 0
551 #define INTERPRETER_AOUT 1
552 #define INTERPRETER_ELF 2
554 #ifndef STACK_RND_MASK
555 #define STACK_RND_MASK 0x7ff /* with 4K pages 8MB of VA */
558 static unsigned long randomize_stack_top(unsigned long stack_top)
560 unsigned int random_variable = 0;
562 if (current->flags & PF_RANDOMIZE) {
563 random_variable = get_random_int() & STACK_RND_MASK;
564 random_variable <<= PAGE_SHIFT;
566 #ifdef CONFIG_STACK_GROWSUP
567 return PAGE_ALIGN(stack_top) + random_variable;
569 return PAGE_ALIGN(stack_top) - random_variable;
573 static int load_elf_binary(struct linux_binprm * bprm, struct pt_regs * regs)
575 struct file *interpreter = NULL; /* to shut gcc up */
576 unsigned long load_addr = 0, load_bias = 0;
577 int load_addr_set = 0;
578 char * elf_interpreter = NULL;
579 unsigned int interpreter_type = INTERPRETER_NONE;
580 unsigned char ibcs2_interpreter = 0;
582 struct elf_phdr * elf_ppnt, *elf_phdata;
583 unsigned long elf_bss, elf_brk;
587 unsigned long elf_entry, interp_load_addr = 0, interp_map_addr = 0;
588 unsigned long start_code, end_code, start_data, end_data;
589 unsigned long reloc_func_desc = 0;
590 char passed_fileno[6];
591 struct files_struct *files;
592 int have_pt_gnu_stack, executable_stack;
593 unsigned long def_flags = 0;
595 struct elfhdr elf_ex;
596 struct elfhdr interp_elf_ex;
597 struct exec interp_ex;
600 loc = kmalloc(sizeof(*loc), GFP_KERNEL);
606 /* Get the exec-header */
607 loc->elf_ex = *((struct elfhdr *) bprm->buf);
610 /* First of all, some simple consistency checks */
611 if (memcmp(loc->elf_ex.e_ident, ELFMAG, SELFMAG) != 0)
614 if (loc->elf_ex.e_type != ET_EXEC && loc->elf_ex.e_type != ET_DYN)
616 if (!elf_check_arch(&loc->elf_ex))
618 if (!bprm->file->f_op||!bprm->file->f_op->mmap)
621 /* Now read in all of the header information */
623 if (loc->elf_ex.e_phentsize != sizeof(struct elf_phdr))
625 if (loc->elf_ex.e_phnum < 1 ||
626 loc->elf_ex.e_phnum > 65536U / sizeof(struct elf_phdr))
628 size = loc->elf_ex.e_phnum * sizeof(struct elf_phdr);
630 elf_phdata = (struct elf_phdr *) kmalloc(size, GFP_KERNEL);
634 retval = kernel_read(bprm->file, loc->elf_ex.e_phoff, (char *) elf_phdata, size);
635 if (retval != size) {
641 files = current->files; /* Refcounted so ok */
642 retval = unshare_files();
645 if (files == current->files) {
646 put_files_struct(files);
650 /* exec will make our files private anyway, but for the a.out
651 loader stuff we need to do it earlier */
653 retval = get_unused_fd();
656 get_file(bprm->file);
657 fd_install(elf_exec_fileno = retval, bprm->file);
659 elf_ppnt = elf_phdata;
668 for (i = 0; i < loc->elf_ex.e_phnum; i++) {
669 if (elf_ppnt->p_type == PT_INTERP) {
670 /* This is the program interpreter used for
671 * shared libraries - for now assume that this
672 * is an a.out format binary
676 if (elf_ppnt->p_filesz > PATH_MAX ||
677 elf_ppnt->p_filesz < 2)
681 elf_interpreter = kmalloc(elf_ppnt->p_filesz,
683 if (!elf_interpreter)
686 retval = kernel_read(bprm->file, elf_ppnt->p_offset,
689 if (retval != elf_ppnt->p_filesz) {
692 goto out_free_interp;
694 /* make sure path is NULL terminated */
696 if (elf_interpreter[elf_ppnt->p_filesz - 1] != '\0')
697 goto out_free_interp;
699 /* If the program interpreter is one of these two,
700 * then assume an iBCS2 image. Otherwise assume
701 * a native linux image.
703 if (strcmp(elf_interpreter,"/usr/lib/libc.so.1") == 0 ||
704 strcmp(elf_interpreter,"/usr/lib/ld.so.1") == 0)
705 ibcs2_interpreter = 1;
708 * The early SET_PERSONALITY here is so that the lookup
709 * for the interpreter happens in the namespace of the
710 * to-be-execed image. SET_PERSONALITY can select an
713 * However, SET_PERSONALITY is NOT allowed to switch
714 * this task into the new images's memory mapping
715 * policy - that is, TASK_SIZE must still evaluate to
716 * that which is appropriate to the execing application.
717 * This is because exit_mmap() needs to have TASK_SIZE
718 * evaluate to the size of the old image.
720 * So if (say) a 64-bit application is execing a 32-bit
721 * application it is the architecture's responsibility
722 * to defer changing the value of TASK_SIZE until the
723 * switch really is going to happen - do this in
724 * flush_thread(). - akpm
726 SET_PERSONALITY(loc->elf_ex, ibcs2_interpreter);
728 interpreter = open_exec(elf_interpreter);
729 retval = PTR_ERR(interpreter);
730 if (IS_ERR(interpreter))
731 goto out_free_interp;
732 retval = kernel_read(interpreter, 0, bprm->buf, BINPRM_BUF_SIZE);
733 if (retval != BINPRM_BUF_SIZE) {
736 goto out_free_dentry;
739 /* Get the exec headers */
740 loc->interp_ex = *((struct exec *) bprm->buf);
741 loc->interp_elf_ex = *((struct elfhdr *) bprm->buf);
747 elf_ppnt = elf_phdata;
748 executable_stack = EXSTACK_DEFAULT;
750 for (i = 0; i < loc->elf_ex.e_phnum; i++, elf_ppnt++)
751 if (elf_ppnt->p_type == PT_GNU_STACK) {
752 if (elf_ppnt->p_flags & PF_X)
753 executable_stack = EXSTACK_ENABLE_X;
755 executable_stack = EXSTACK_DISABLE_X;
758 have_pt_gnu_stack = (i < loc->elf_ex.e_phnum);
760 if (current->personality == PER_LINUX && (exec_shield & 2)) {
761 executable_stack = EXSTACK_DISABLE_X;
762 current->flags |= PF_RANDOMIZE;
765 /* Some simple consistency checks for the interpreter */
766 if (elf_interpreter) {
767 interpreter_type = INTERPRETER_ELF | INTERPRETER_AOUT;
769 /* Now figure out which format our binary is */
770 if ((N_MAGIC(loc->interp_ex) != OMAGIC) &&
771 (N_MAGIC(loc->interp_ex) != ZMAGIC) &&
772 (N_MAGIC(loc->interp_ex) != QMAGIC))
773 interpreter_type = INTERPRETER_ELF;
775 if (memcmp(loc->interp_elf_ex.e_ident, ELFMAG, SELFMAG) != 0)
776 interpreter_type &= ~INTERPRETER_ELF;
779 if (!interpreter_type)
780 goto out_free_dentry;
782 /* Make sure only one type was selected */
783 if ((interpreter_type & INTERPRETER_ELF) &&
784 interpreter_type != INTERPRETER_ELF) {
785 // FIXME - ratelimit this before re-enabling
786 // printk(KERN_WARNING "ELF: Ambiguous type, using ELF\n");
787 interpreter_type = INTERPRETER_ELF;
789 /* Verify the interpreter has a valid arch */
790 if ((interpreter_type == INTERPRETER_ELF) &&
791 !elf_check_arch(&loc->interp_elf_ex))
792 goto out_free_dentry;
794 /* Executables without an interpreter also need a personality */
795 SET_PERSONALITY(loc->elf_ex, ibcs2_interpreter);
798 /* OK, we are done with that, now set up the arg stuff,
799 and then start this sucker up */
801 if ((!bprm->sh_bang) && (interpreter_type == INTERPRETER_AOUT)) {
802 char *passed_p = passed_fileno;
803 sprintf(passed_fileno, "%d", elf_exec_fileno);
805 if (elf_interpreter) {
806 retval = copy_strings_kernel(1, &passed_p, bprm);
808 goto out_free_dentry;
813 /* Flush all traces of the currently running executable */
814 retval = flush_old_exec(bprm);
816 goto out_free_dentry;
820 * Turn off the CS limit completely if exec-shield disabled or
823 if (!exec_shield || executable_stack != EXSTACK_DISABLE_X || nx_enabled)
824 arch_add_exec_range(current->mm, -1);
827 /* Discard our unneeded old files struct */
830 put_files_struct(files);
834 /* OK, This is the point of no return */
835 current->mm->start_data = 0;
836 current->mm->end_data = 0;
837 current->mm->end_code = 0;
838 current->mm->mmap = NULL;
839 current->flags &= ~PF_FORKNOEXEC;
840 current->mm->def_flags = def_flags;
842 /* Do this immediately, since STACK_TOP as used in setup_arg_pages
843 may depend on the personality. */
844 SET_PERSONALITY(loc->elf_ex, ibcs2_interpreter);
845 if (!(exec_shield & 2) &&
846 elf_read_implies_exec(loc->elf_ex, executable_stack))
847 current->personality |= READ_IMPLIES_EXEC;
849 if ( !(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
850 current->flags |= PF_RANDOMIZE;
851 arch_pick_mmap_layout(current->mm);
853 /* Do this so that we can load the interpreter, if need be. We will
854 change some of these later */
855 current->mm->free_area_cache = current->mm->mmap_base;
856 current->mm->cached_hole_size = 0;
857 retval = setup_arg_pages(bprm, randomize_stack_top(STACK_TOP),
860 send_sig(SIGKILL, current, 0);
861 goto out_free_dentry;
864 current->mm->start_stack = bprm->p;
867 /* Now we do a little grungy work by mmaping the ELF image into
868 the correct location in memory.
871 for(i = 0, elf_ppnt = elf_phdata; i < loc->elf_ex.e_phnum; i++, elf_ppnt++) {
872 int elf_prot = 0, elf_flags;
873 unsigned long k, vaddr;
875 if (elf_ppnt->p_type != PT_LOAD)
878 if (unlikely (elf_brk > elf_bss)) {
881 /* There was a PT_LOAD segment with p_memsz > p_filesz
882 before this one. Map anonymous pages, if needed,
883 and clear the area. */
884 retval = set_brk (elf_bss + load_bias,
885 elf_brk + load_bias);
887 send_sig(SIGKILL, current, 0);
888 goto out_free_dentry;
890 nbyte = ELF_PAGEOFFSET(elf_bss);
892 nbyte = ELF_MIN_ALIGN - nbyte;
893 if (nbyte > elf_brk - elf_bss)
894 nbyte = elf_brk - elf_bss;
895 if (clear_user((void __user *)elf_bss +
898 * This bss-zeroing can fail if the ELF
899 * file specifies odd protections. So
900 * we don't check the return value
906 if (elf_ppnt->p_flags & PF_R) elf_prot |= PROT_READ;
907 if (elf_ppnt->p_flags & PF_W) elf_prot |= PROT_WRITE;
908 if (elf_ppnt->p_flags & PF_X) elf_prot |= PROT_EXEC;
910 elf_flags = MAP_PRIVATE|MAP_DENYWRITE|MAP_EXECUTABLE;
912 vaddr = elf_ppnt->p_vaddr;
913 if (loc->elf_ex.e_type == ET_EXEC || load_addr_set)
914 elf_flags |= MAP_FIXED;
915 else if (loc->elf_ex.e_type == ET_DYN)
919 load_bias = ELF_PAGESTART(ELF_ET_DYN_BASE - vaddr);
922 error = elf_map(bprm->file, load_bias + vaddr, elf_ppnt, elf_prot, elf_flags, 0);
923 if (BAD_ADDR(error)) {
924 send_sig(SIGKILL, current, 0);
925 goto out_free_dentry;
928 if (!load_addr_set) {
930 load_addr = (elf_ppnt->p_vaddr - elf_ppnt->p_offset);
931 if (loc->elf_ex.e_type == ET_DYN) {
933 ELF_PAGESTART(load_bias + vaddr);
934 load_addr += load_bias;
935 reloc_func_desc = load_bias;
938 k = elf_ppnt->p_vaddr;
939 if (k < start_code) start_code = k;
940 if (start_data < k) start_data = k;
943 * Check to see if the section's size will overflow the
944 * allowed task size. Note that p_filesz must always be
945 * <= p_memsz so it is only necessary to check p_memsz.
947 if (k > TASK_SIZE || elf_ppnt->p_filesz > elf_ppnt->p_memsz ||
948 elf_ppnt->p_memsz > TASK_SIZE ||
949 TASK_SIZE - elf_ppnt->p_memsz < k) {
950 /* set_brk can never work. Avoid overflows. */
951 send_sig(SIGKILL, current, 0);
952 goto out_free_dentry;
955 k = elf_ppnt->p_vaddr + elf_ppnt->p_filesz;
959 if ((elf_ppnt->p_flags & PF_X) && end_code < k)
963 k = elf_ppnt->p_vaddr + elf_ppnt->p_memsz;
968 loc->elf_ex.e_entry += load_bias;
969 elf_bss += load_bias;
970 elf_brk += load_bias;
971 start_code += load_bias;
972 end_code += load_bias;
973 start_data += load_bias;
974 end_data += load_bias;
976 /* Calling set_brk effectively mmaps the pages that we need
977 * for the bss and break sections. We must do this before
978 * mapping in the interpreter, to make sure it doesn't wind
979 * up getting placed where the bss needs to go.
981 retval = set_brk(elf_bss, elf_brk);
983 send_sig(SIGKILL, current, 0);
984 goto out_free_dentry;
986 if (likely(elf_bss != elf_brk) && unlikely(padzero(elf_bss))) {
987 send_sig(SIGSEGV, current, 0);
988 retval = -EFAULT; /* Nobody gets to see this, but.. */
989 goto out_free_dentry;
992 if (elf_interpreter) {
993 if (interpreter_type == INTERPRETER_AOUT)
994 elf_entry = load_aout_interp(&loc->interp_ex,
997 elf_entry = load_elf_interp(&loc->interp_elf_ex,
1001 if (!BAD_ADDR(elf_entry)) {
1002 /* load_elf_interp() returns relocation adjustment */
1003 interp_load_addr = elf_entry;
1004 elf_entry += loc->interp_elf_ex.e_entry;
1007 if (BAD_ADDR(elf_entry)) {
1008 printk(KERN_ERR "Unable to load interpreter %.128s\n",
1010 force_sig(SIGSEGV, current);
1011 retval = -ENOEXEC; /* Nobody gets to see this, but.. */
1012 goto out_free_dentry;
1014 reloc_func_desc = interp_load_addr;
1016 allow_write_access(interpreter);
1018 kfree(elf_interpreter);
1020 elf_entry = loc->elf_ex.e_entry;
1021 if (BAD_ADDR(elf_entry)) {
1022 send_sig(SIGSEGV, current, 0);
1023 retval = -ENOEXEC; /* Nobody gets to see this, but.. */
1024 goto out_free_dentry;
1028 if (interpreter_type != INTERPRETER_AOUT)
1029 sys_close(elf_exec_fileno);
1031 set_binfmt(&elf_format);
1033 #ifdef ARCH_HAS_SETUP_ADDITIONAL_PAGES
1034 retval = arch_setup_additional_pages(bprm, executable_stack,
1035 start_code, interp_map_addr);
1037 send_sig(SIGKILL, current, 0);
1040 #endif /* ARCH_HAS_SETUP_ADDITIONAL_PAGES */
1044 compute_creds(bprm);
1045 current->flags &= ~PF_FORKNOEXEC;
1046 create_elf_tables(bprm, &loc->elf_ex, (interpreter_type == INTERPRETER_AOUT),
1047 load_addr, interp_load_addr);
1048 /* N.B. passed_fileno might not be initialized? */
1049 if (interpreter_type == INTERPRETER_AOUT)
1050 current->mm->arg_start += strlen(passed_fileno) + 1;
1051 current->mm->end_code = end_code;
1052 current->mm->start_code = start_code;
1053 current->mm->start_data = start_data;
1054 current->mm->end_data = end_data;
1055 current->mm->start_stack = bprm->p;
1057 #ifdef __HAVE_ARCH_RANDOMIZE_BRK
1058 if (current->flags & PF_RANDOMIZE)
1059 randomize_brk(elf_brk);
1061 if (current->personality & MMAP_PAGE_ZERO) {
1062 /* Why this, you ask??? Well SVr4 maps page 0 as read-only,
1063 and some applications "depend" upon this behavior.
1064 Since we do not have the power to recompile these, we
1065 emulate the SVr4 behavior. Sigh. */
1066 down_write(¤t->mm->mmap_sem);
1067 error = do_mmap(NULL, 0, PAGE_SIZE, PROT_READ | PROT_EXEC,
1068 MAP_FIXED | MAP_PRIVATE, 0);
1069 up_write(¤t->mm->mmap_sem);
1072 #ifdef ELF_PLAT_INIT
1074 * The ABI may specify that certain registers be set up in special
1075 * ways (on i386 %edx is the address of a DT_FINI function, for
1076 * example. In addition, it may also specify (eg, PowerPC64 ELF)
1077 * that the e_entry field is the address of the function descriptor
1078 * for the startup routine, rather than the address of the startup
1079 * routine itself. This macro performs whatever initialization to
1080 * the regs structure is required as well as any relocations to the
1081 * function descriptor entries when executing dynamically links apps.
1083 ELF_PLAT_INIT(regs, reloc_func_desc);
1086 start_thread(regs, elf_entry, bprm->p);
1087 if (unlikely(current->ptrace & PT_PTRACED)) {
1088 if (current->ptrace & PT_TRACE_EXEC)
1089 ptrace_notify ((PTRACE_EVENT_EXEC << 8) | SIGTRAP);
1091 send_sig(SIGTRAP, current, 0);
1101 allow_write_access(interpreter);
1105 kfree(elf_interpreter);
1107 sys_close(elf_exec_fileno);
1110 put_files_struct(current->files);
1111 current->files = files;
1118 /* This is really simpleminded and specialized - we are loading an
1119 a.out library that is given an ELF header. */
1121 static int load_elf_library(struct file *file)
1123 struct elf_phdr *elf_phdata;
1124 struct elf_phdr *eppnt;
1125 unsigned long elf_bss, bss, len;
1126 int retval, error, i, j;
1127 struct elfhdr elf_ex;
1130 retval = kernel_read(file, 0, (char *) &elf_ex, sizeof(elf_ex));
1131 if (retval != sizeof(elf_ex))
1134 if (memcmp(elf_ex.e_ident, ELFMAG, SELFMAG) != 0)
1137 /* First of all, some simple consistency checks */
1138 if (elf_ex.e_type != ET_EXEC || elf_ex.e_phnum > 2 ||
1139 !elf_check_arch(&elf_ex) || !file->f_op || !file->f_op->mmap)
1142 /* Now read in all of the header information */
1144 j = sizeof(struct elf_phdr) * elf_ex.e_phnum;
1145 /* j < ELF_MIN_ALIGN because elf_ex.e_phnum <= 2 */
1148 elf_phdata = kmalloc(j, GFP_KERNEL);
1154 retval = kernel_read(file, elf_ex.e_phoff, (char *)eppnt, j);
1158 for (j = 0, i = 0; i<elf_ex.e_phnum; i++)
1159 if ((eppnt + i)->p_type == PT_LOAD)
1164 while (eppnt->p_type != PT_LOAD)
1167 /* Now use mmap to map the library into memory. */
1168 down_write(¤t->mm->mmap_sem);
1169 error = do_mmap(file,
1170 ELF_PAGESTART(eppnt->p_vaddr),
1172 ELF_PAGEOFFSET(eppnt->p_vaddr)),
1173 PROT_READ | PROT_WRITE | PROT_EXEC,
1174 MAP_FIXED | MAP_PRIVATE | MAP_DENYWRITE,
1176 ELF_PAGEOFFSET(eppnt->p_vaddr)));
1177 up_write(¤t->mm->mmap_sem);
1178 if (error != ELF_PAGESTART(eppnt->p_vaddr))
1181 elf_bss = eppnt->p_vaddr + eppnt->p_filesz;
1182 if (padzero(elf_bss)) {
1187 len = ELF_PAGESTART(eppnt->p_filesz + eppnt->p_vaddr + ELF_MIN_ALIGN - 1);
1188 bss = eppnt->p_memsz + eppnt->p_vaddr;
1190 down_write(¤t->mm->mmap_sem);
1191 do_brk(len, bss - len);
1192 up_write(¤t->mm->mmap_sem);
1203 * Note that some platforms still use traditional core dumps and not
1204 * the ELF core dump. Each platform can select it as appropriate.
1206 #if defined(USE_ELF_CORE_DUMP) && defined(CONFIG_ELF_CORE)
1211 * Modelled on fs/exec.c:aout_core_dump()
1212 * Jeremy Fitzhardinge <jeremy@sw.oz.au>
1215 * These are the only things you should do on a core-file: use only these
1216 * functions to write out all the necessary info.
1218 static int dump_write(struct file *file, const void *addr, int nr)
1220 return file->f_op->write(file, addr, nr, &file->f_pos) == nr;
1223 static int dump_seek(struct file *file, loff_t off)
1225 if (file->f_op->llseek) {
1226 if (file->f_op->llseek(file, off, 0) != off)
1234 * Decide whether a segment is worth dumping; default is yes to be
1235 * sure (missing info is worse than too much; etc).
1236 * Personally I'd include everything, and use the coredump limit...
1238 * I think we should skip something. But I am not sure how. H.J.
1240 static int maydump(struct vm_area_struct *vma)
1242 /* Do not dump I/O mapped devices or special mappings */
1243 if (vma->vm_flags & (VM_IO | VM_RESERVED))
1246 if (vma->vm_flags & VM_DONTEXPAND) /* Kludge for vDSO. */
1249 /* Dump shared memory only if mapped from an anonymous file. */
1250 if (vma->vm_flags & VM_SHARED)
1251 return vma->vm_file->f_dentry->d_inode->i_nlink == 0;
1253 /* If it hasn't been written to, don't write it out */
1260 #define roundup(x, y) ((((x)+((y)-1))/(y))*(y))
1262 /* An ELF note in memory */
1267 unsigned int datasz;
1271 static int notesize(struct memelfnote *en)
1275 sz = sizeof(struct elf_note);
1276 sz += roundup(strlen(en->name) + 1, 4);
1277 sz += roundup(en->datasz, 4);
1282 #define DUMP_WRITE(addr, nr) \
1283 do { if (!dump_write(file, (addr), (nr))) return 0; } while(0)
1284 #define DUMP_SEEK(off) \
1285 do { if (!dump_seek(file, (off))) return 0; } while(0)
1287 static int writenote(struct memelfnote *men, struct file *file)
1291 en.n_namesz = strlen(men->name) + 1;
1292 en.n_descsz = men->datasz;
1293 en.n_type = men->type;
1295 DUMP_WRITE(&en, sizeof(en));
1296 DUMP_WRITE(men->name, en.n_namesz);
1297 /* XXX - cast from long long to long to avoid need for libgcc.a */
1298 DUMP_SEEK(roundup((unsigned long)file->f_pos, 4)); /* XXX */
1299 DUMP_WRITE(men->data, men->datasz);
1300 DUMP_SEEK(roundup((unsigned long)file->f_pos, 4)); /* XXX */
1307 #define DUMP_WRITE(addr, nr) \
1308 if ((size += (nr)) > limit || !dump_write(file, (addr), (nr))) \
1310 #define DUMP_SEEK(off) \
1311 if (!dump_seek(file, (off))) \
1314 static void fill_elf_header(struct elfhdr *elf, int segs)
1316 memcpy(elf->e_ident, ELFMAG, SELFMAG);
1317 elf->e_ident[EI_CLASS] = ELF_CLASS;
1318 elf->e_ident[EI_DATA] = ELF_DATA;
1319 elf->e_ident[EI_VERSION] = EV_CURRENT;
1320 elf->e_ident[EI_OSABI] = ELF_OSABI;
1321 memset(elf->e_ident+EI_PAD, 0, EI_NIDENT-EI_PAD);
1323 elf->e_type = ET_CORE;
1324 elf->e_machine = ELF_ARCH;
1325 elf->e_version = EV_CURRENT;
1327 elf->e_phoff = sizeof(struct elfhdr);
1329 elf->e_flags = ELF_CORE_EFLAGS;
1330 elf->e_ehsize = sizeof(struct elfhdr);
1331 elf->e_phentsize = sizeof(struct elf_phdr);
1332 elf->e_phnum = segs;
1333 elf->e_shentsize = 0;
1335 elf->e_shstrndx = 0;
1339 static void fill_elf_note_phdr(struct elf_phdr *phdr, int sz, off_t offset)
1341 phdr->p_type = PT_NOTE;
1342 phdr->p_offset = offset;
1345 phdr->p_filesz = sz;
1352 static void fill_note(struct memelfnote *note, const char *name, int type,
1353 unsigned int sz, void *data)
1363 * fill up all the fields in prstatus from the given task struct, except registers
1364 * which need to be filled up separately.
1366 static void fill_prstatus(struct elf_prstatus *prstatus,
1367 struct task_struct *p, long signr)
1369 prstatus->pr_info.si_signo = prstatus->pr_cursig = signr;
1370 prstatus->pr_sigpend = p->pending.signal.sig[0];
1371 prstatus->pr_sighold = p->blocked.sig[0];
1372 prstatus->pr_pid = p->pid;
1373 prstatus->pr_ppid = p->parent->pid;
1374 prstatus->pr_pgrp = process_group(p);
1375 prstatus->pr_sid = p->signal->session;
1376 if (thread_group_leader(p)) {
1378 * This is the record for the group leader. Add in the
1379 * cumulative times of previous dead threads. This total
1380 * won't include the time of each live thread whose state
1381 * is included in the core dump. The final total reported
1382 * to our parent process when it calls wait4 will include
1383 * those sums as well as the little bit more time it takes
1384 * this and each other thread to finish dying after the
1385 * core dump synchronization phase.
1387 cputime_to_timeval(cputime_add(p->utime, p->signal->utime),
1388 &prstatus->pr_utime);
1389 cputime_to_timeval(cputime_add(p->stime, p->signal->stime),
1390 &prstatus->pr_stime);
1392 cputime_to_timeval(p->utime, &prstatus->pr_utime);
1393 cputime_to_timeval(p->stime, &prstatus->pr_stime);
1395 cputime_to_timeval(p->signal->cutime, &prstatus->pr_cutime);
1396 cputime_to_timeval(p->signal->cstime, &prstatus->pr_cstime);
1399 static int fill_psinfo(struct elf_prpsinfo *psinfo, struct task_struct *p,
1400 struct mm_struct *mm)
1402 unsigned int i, len;
1404 /* first copy the parameters from user space */
1405 memset(psinfo, 0, sizeof(struct elf_prpsinfo));
1407 len = mm->arg_end - mm->arg_start;
1408 if (len >= ELF_PRARGSZ)
1409 len = ELF_PRARGSZ-1;
1410 if (copy_from_user(&psinfo->pr_psargs,
1411 (const char __user *)mm->arg_start, len))
1413 for(i = 0; i < len; i++)
1414 if (psinfo->pr_psargs[i] == 0)
1415 psinfo->pr_psargs[i] = ' ';
1416 psinfo->pr_psargs[len] = 0;
1418 psinfo->pr_pid = p->pid;
1419 psinfo->pr_ppid = p->parent->pid;
1420 psinfo->pr_pgrp = process_group(p);
1421 psinfo->pr_sid = p->signal->session;
1423 i = p->state ? ffz(~p->state) + 1 : 0;
1424 psinfo->pr_state = i;
1425 psinfo->pr_sname = (i > 5) ? '.' : "RSDTZW"[i];
1426 psinfo->pr_zomb = psinfo->pr_sname == 'Z';
1427 psinfo->pr_nice = task_nice(p);
1428 psinfo->pr_flag = p->flags;
1429 SET_UID(psinfo->pr_uid, p->uid);
1430 SET_GID(psinfo->pr_gid, p->gid);
1431 strncpy(psinfo->pr_fname, p->comm, sizeof(psinfo->pr_fname));
1436 /* Here is the structure in which status of each thread is captured. */
1437 struct elf_thread_status
1439 struct list_head list;
1440 struct elf_prstatus prstatus; /* NT_PRSTATUS */
1441 elf_fpregset_t fpu; /* NT_PRFPREG */
1442 struct task_struct *thread;
1443 #ifdef ELF_CORE_COPY_XFPREGS
1444 elf_fpxregset_t xfpu; /* NT_PRXFPREG */
1446 struct memelfnote notes[3];
1451 * In order to add the specific thread information for the elf file format,
1452 * we need to keep a linked list of every threads pr_status and then
1453 * create a single section for them in the final core file.
1455 static int elf_dump_thread_status(long signr, struct elf_thread_status *t)
1458 struct task_struct *p = t->thread;
1461 fill_prstatus(&t->prstatus, p, signr);
1462 elf_core_copy_task_regs(p, &t->prstatus.pr_reg);
1464 fill_note(&t->notes[0], "CORE", NT_PRSTATUS, sizeof(t->prstatus), &(t->prstatus));
1466 sz += notesize(&t->notes[0]);
1468 if ((t->prstatus.pr_fpvalid = elf_core_copy_task_fpregs(p, NULL, &t->fpu))) {
1469 fill_note(&t->notes[1], "CORE", NT_PRFPREG, sizeof(t->fpu), &(t->fpu));
1471 sz += notesize(&t->notes[1]);
1474 #ifdef ELF_CORE_COPY_XFPREGS
1475 if (elf_core_copy_task_xfpregs(p, &t->xfpu)) {
1476 fill_note(&t->notes[2], "LINUX", NT_PRXFPREG, sizeof(t->xfpu), &t->xfpu);
1478 sz += notesize(&t->notes[2]);
1487 * This is a two-pass process; first we find the offsets of the bits,
1488 * and then they are actually written out. If we run out of core limit
1491 static int elf_core_dump(long signr, struct pt_regs * regs, struct file * file)
1499 struct vm_area_struct *vma;
1500 struct elfhdr *elf = NULL;
1501 off_t offset = 0, dataoff;
1502 unsigned long limit = current->signal->rlim[RLIMIT_CORE].rlim_cur;
1504 struct memelfnote *notes = NULL;
1505 struct elf_prstatus *prstatus = NULL; /* NT_PRSTATUS */
1506 struct elf_prpsinfo *psinfo = NULL; /* NT_PRPSINFO */
1507 struct task_struct *g, *p;
1508 LIST_HEAD(thread_list);
1509 struct list_head *t;
1510 elf_fpregset_t *fpu = NULL;
1511 #ifdef ELF_CORE_COPY_XFPREGS
1512 elf_fpxregset_t *xfpu = NULL;
1514 int thread_status_size = 0;
1518 * We no longer stop all VM operations.
1520 * This is because those proceses that could possibly change map_count or
1521 * the mmap / vma pages are now blocked in do_exit on current finishing
1524 * Only ptrace can touch these memory addresses, but it doesn't change
1525 * the map_count or the pages allocated. So no possibility of crashing
1526 * exists while dumping the mm->vm_next areas to the core file.
1529 /* alloc memory for large data structures: too large to be on stack */
1530 elf = kmalloc(sizeof(*elf), GFP_KERNEL);
1533 prstatus = kmalloc(sizeof(*prstatus), GFP_KERNEL);
1536 psinfo = kmalloc(sizeof(*psinfo), GFP_KERNEL);
1539 notes = kmalloc(NUM_NOTES * sizeof(struct memelfnote), GFP_KERNEL);
1542 fpu = kmalloc(sizeof(*fpu), GFP_KERNEL);
1545 #ifdef ELF_CORE_COPY_XFPREGS
1546 xfpu = kmalloc(sizeof(*xfpu), GFP_KERNEL);
1552 struct elf_thread_status *tmp;
1553 read_lock(&tasklist_lock);
1555 if (current->mm == p->mm && current != p) {
1556 tmp = kzalloc(sizeof(*tmp), GFP_ATOMIC);
1558 read_unlock(&tasklist_lock);
1561 INIT_LIST_HEAD(&tmp->list);
1563 list_add(&tmp->list, &thread_list);
1565 while_each_thread(g,p);
1566 read_unlock(&tasklist_lock);
1567 list_for_each(t, &thread_list) {
1568 struct elf_thread_status *tmp;
1571 tmp = list_entry(t, struct elf_thread_status, list);
1572 sz = elf_dump_thread_status(signr, tmp);
1573 thread_status_size += sz;
1576 /* now collect the dump for the current */
1577 memset(prstatus, 0, sizeof(*prstatus));
1578 fill_prstatus(prstatus, current, signr);
1579 elf_core_copy_regs(&prstatus->pr_reg, regs);
1581 segs = current->mm->map_count;
1582 #ifdef ELF_CORE_EXTRA_PHDRS
1583 segs += ELF_CORE_EXTRA_PHDRS;
1587 fill_elf_header(elf, segs+1); /* including notes section */
1590 current->flags |= PF_DUMPCORE;
1593 * Set up the notes in similar form to SVR4 core dumps made
1594 * with info from their /proc.
1597 fill_note(notes +0, "CORE", NT_PRSTATUS, sizeof(*prstatus), prstatus);
1599 fill_psinfo(psinfo, current->group_leader, current->mm);
1600 fill_note(notes +1, "CORE", NT_PRPSINFO, sizeof(*psinfo), psinfo);
1604 auxv = (elf_addr_t *) current->mm->saved_auxv;
1609 while (auxv[i - 2] != AT_NULL);
1610 fill_note(¬es[numnote++], "CORE", NT_AUXV,
1611 i * sizeof (elf_addr_t), auxv);
1613 /* Try to dump the FPU. */
1614 if ((prstatus->pr_fpvalid = elf_core_copy_task_fpregs(current, regs, fpu)))
1615 fill_note(notes + numnote++,
1616 "CORE", NT_PRFPREG, sizeof(*fpu), fpu);
1617 #ifdef ELF_CORE_COPY_XFPREGS
1618 if (elf_core_copy_task_xfpregs(current, xfpu))
1619 fill_note(notes + numnote++,
1620 "LINUX", NT_PRXFPREG, sizeof(*xfpu), xfpu);
1626 DUMP_WRITE(elf, sizeof(*elf));
1627 offset += sizeof(*elf); /* Elf header */
1628 offset += (segs+1) * sizeof(struct elf_phdr); /* Program headers */
1630 /* Write notes phdr entry */
1632 struct elf_phdr phdr;
1635 for (i = 0; i < numnote; i++)
1636 sz += notesize(notes + i);
1638 sz += thread_status_size;
1640 fill_elf_note_phdr(&phdr, sz, offset);
1642 DUMP_WRITE(&phdr, sizeof(phdr));
1645 /* Page-align dumped data */
1646 dataoff = offset = roundup(offset, ELF_EXEC_PAGESIZE);
1648 /* Write program headers for segments dump */
1649 for (vma = current->mm->mmap; vma != NULL; vma = vma->vm_next) {
1650 struct elf_phdr phdr;
1653 sz = vma->vm_end - vma->vm_start;
1655 phdr.p_type = PT_LOAD;
1656 phdr.p_offset = offset;
1657 phdr.p_vaddr = vma->vm_start;
1659 phdr.p_filesz = maydump(vma) ? sz : 0;
1661 offset += phdr.p_filesz;
1662 phdr.p_flags = vma->vm_flags & VM_READ ? PF_R : 0;
1663 if (vma->vm_flags & VM_WRITE) phdr.p_flags |= PF_W;
1664 if (vma->vm_flags & VM_EXEC) phdr.p_flags |= PF_X;
1665 phdr.p_align = ELF_EXEC_PAGESIZE;
1667 DUMP_WRITE(&phdr, sizeof(phdr));
1670 #ifdef ELF_CORE_WRITE_EXTRA_PHDRS
1671 ELF_CORE_WRITE_EXTRA_PHDRS;
1674 /* write out the notes section */
1675 for (i = 0; i < numnote; i++)
1676 if (!writenote(notes + i, file))
1679 /* write out the thread status notes section */
1680 list_for_each(t, &thread_list) {
1681 struct elf_thread_status *tmp = list_entry(t, struct elf_thread_status, list);
1682 for (i = 0; i < tmp->num_notes; i++)
1683 if (!writenote(&tmp->notes[i], file))
1689 for (vma = current->mm->mmap; vma != NULL; vma = vma->vm_next) {
1695 for (addr = vma->vm_start;
1697 addr += PAGE_SIZE) {
1699 struct vm_area_struct *vma;
1701 if (get_user_pages(current, current->mm, addr, 1, 0, 1,
1702 &page, &vma) <= 0) {
1703 DUMP_SEEK (file->f_pos + PAGE_SIZE);
1705 if (page == ZERO_PAGE(addr)) {
1706 DUMP_SEEK (file->f_pos + PAGE_SIZE);
1709 flush_cache_page(vma, addr, page_to_pfn(page));
1711 if ((size += PAGE_SIZE) > limit ||
1712 !dump_write(file, kaddr,
1715 page_cache_release(page);
1720 page_cache_release(page);
1725 #ifdef ELF_CORE_WRITE_EXTRA_DATA
1726 ELF_CORE_WRITE_EXTRA_DATA;
1729 if ((off_t)file->f_pos != offset) {
1731 printk(KERN_WARNING "elf_core_dump: file->f_pos (%ld) != offset (%ld)\n",
1732 (off_t)file->f_pos, offset);
1739 while (!list_empty(&thread_list)) {
1740 struct list_head *tmp = thread_list.next;
1742 kfree(list_entry(tmp, struct elf_thread_status, list));
1750 #ifdef ELF_CORE_COPY_XFPREGS
1757 #endif /* USE_ELF_CORE_DUMP */
1759 static int __init init_elf_binfmt(void)
1761 return register_binfmt(&elf_format);
1764 static void __exit exit_elf_binfmt(void)
1766 /* Remove the COFF and ELF loaders. */
1767 unregister_binfmt(&elf_format);
1770 core_initcall(init_elf_binfmt);
1771 module_exit(exit_elf_binfmt);
1772 MODULE_LICENSE("GPL");