2 * linux/fs/binfmt_elf.c
4 * These are the functions used to load ELF format executables as used
5 * on SVr4 machines. Information on the format may be found in the book
6 * "UNIX SYSTEM V RELEASE 4 Programmers Guide: Ansi C and Programming Support
9 * Copyright 1993, 1994: Eric Youngdale (ericy@cais.com).
12 #include <linux/module.h>
13 #include <linux/kernel.h>
15 #include <linux/stat.h>
16 #include <linux/time.h>
18 #include <linux/mman.h>
19 #include <linux/a.out.h>
20 #include <linux/errno.h>
21 #include <linux/signal.h>
22 #include <linux/binfmts.h>
23 #include <linux/string.h>
24 #include <linux/file.h>
25 #include <linux/fcntl.h>
26 #include <linux/ptrace.h>
27 #include <linux/slab.h>
28 #include <linux/shm.h>
29 #include <linux/personality.h>
30 #include <linux/elfcore.h>
31 #include <linux/init.h>
32 #include <linux/highuid.h>
33 #include <linux/smp.h>
34 #include <linux/smp_lock.h>
35 #include <linux/compiler.h>
36 #include <linux/highmem.h>
37 #include <linux/pagemap.h>
38 #include <linux/security.h>
39 #include <linux/syscalls.h>
40 #include <linux/random.h>
41 #include <linux/elf.h>
42 #include <linux/vs_base.h>
43 #include <linux/vs_memory.h>
44 #include <linux/vs_cvirt.h>
46 #include <asm/uaccess.h>
47 #include <asm/param.h>
51 static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs);
52 static int load_elf_library(struct file *);
53 static unsigned long elf_map (struct file *, unsigned long, struct elf_phdr *, int, int, unsigned long);
54 extern int dump_fpu (struct pt_regs *, elf_fpregset_t *);
57 #define elf_addr_t unsigned long
61 * If we don't support core dumping, then supply a NULL so we
64 #if defined(USE_ELF_CORE_DUMP) && defined(CONFIG_ELF_CORE)
65 static int elf_core_dump(long signr, struct pt_regs *regs, struct file *file);
67 #define elf_core_dump NULL
70 #if ELF_EXEC_PAGESIZE > PAGE_SIZE
71 #define ELF_MIN_ALIGN ELF_EXEC_PAGESIZE
73 #define ELF_MIN_ALIGN PAGE_SIZE
76 #ifndef ELF_CORE_EFLAGS
77 #define ELF_CORE_EFLAGS 0
80 #define ELF_PAGESTART(_v) ((_v) & ~(unsigned long)(ELF_MIN_ALIGN-1))
81 #define ELF_PAGEOFFSET(_v) ((_v) & (ELF_MIN_ALIGN-1))
82 #define ELF_PAGEALIGN(_v) (((_v) + ELF_MIN_ALIGN - 1) & ~(ELF_MIN_ALIGN - 1))
84 static struct linux_binfmt elf_format = {
85 .module = THIS_MODULE,
86 .load_binary = load_elf_binary,
87 .load_shlib = load_elf_library,
88 .core_dump = elf_core_dump,
89 .min_coredump = ELF_EXEC_PAGESIZE
92 #define BAD_ADDR(x) ((unsigned long)(x) >= PAGE_MASK)
94 static int set_brk(unsigned long start, unsigned long end)
96 start = ELF_PAGEALIGN(start);
97 end = ELF_PAGEALIGN(end);
100 down_write(¤t->mm->mmap_sem);
101 addr = do_brk(start, end - start);
102 up_write(¤t->mm->mmap_sem);
106 current->mm->start_brk = current->mm->brk = end;
110 /* We need to explicitly zero any fractional pages
111 after the data section (i.e. bss). This would
112 contain the junk from the file that should not
115 static int padzero(unsigned long elf_bss)
119 nbyte = ELF_PAGEOFFSET(elf_bss);
121 nbyte = ELF_MIN_ALIGN - nbyte;
122 if (clear_user((void __user *) elf_bss, nbyte))
128 /* Let's use some macros to make this stack manipulation a litle clearer */
129 #ifdef CONFIG_STACK_GROWSUP
130 #define STACK_ADD(sp, items) ((elf_addr_t __user *)(sp) + (items))
131 #define STACK_ROUND(sp, items) \
132 ((15 + (unsigned long) ((sp) + (items))) &~ 15UL)
133 #define STACK_ALLOC(sp, len) ({ \
134 elf_addr_t __user *old_sp = (elf_addr_t __user *)sp; sp += len; \
137 #define STACK_ADD(sp, items) ((elf_addr_t __user *)(sp) - (items))
138 #define STACK_ROUND(sp, items) \
139 (((unsigned long) (sp - items)) &~ 15UL)
140 #define STACK_ALLOC(sp, len) ({ sp -= len ; sp; })
144 create_elf_tables(struct linux_binprm *bprm, struct elfhdr *exec,
145 int interp_aout, unsigned long load_addr,
146 unsigned long interp_load_addr)
148 unsigned long p = bprm->p;
149 int argc = bprm->argc;
150 int envc = bprm->envc;
151 elf_addr_t __user *argv;
152 elf_addr_t __user *envp;
153 elf_addr_t __user *sp;
154 elf_addr_t __user *u_platform;
155 const char *k_platform = ELF_PLATFORM;
157 elf_addr_t *elf_info;
159 struct task_struct *tsk = current;
162 * If this architecture has a platform capability string, copy it
163 * to userspace. In some cases (Sparc), this info is impossible
164 * for userspace to get any other way, in others (i386) it is
169 size_t len = strlen(k_platform) + 1;
172 * In some cases (e.g. Hyper-Threading), we want to avoid L1
173 * evictions by the processes running on the same package. One
174 * thing we can do is to shuffle the initial stack for them.
177 p = arch_align_stack(p);
179 u_platform = (elf_addr_t __user *)STACK_ALLOC(p, len);
180 if (__copy_to_user(u_platform, k_platform, len))
184 /* Create the ELF interpreter info */
185 elf_info = (elf_addr_t *)current->mm->saved_auxv;
186 #define NEW_AUX_ENT(id, val) \
188 elf_info[ei_index++] = id; \
189 elf_info[ei_index++] = val; \
194 * ARCH_DLINFO must come first so PPC can do its special alignment of
199 NEW_AUX_ENT(AT_HWCAP, ELF_HWCAP);
200 NEW_AUX_ENT(AT_PAGESZ, ELF_EXEC_PAGESIZE);
201 NEW_AUX_ENT(AT_CLKTCK, CLOCKS_PER_SEC);
202 NEW_AUX_ENT(AT_PHDR, load_addr + exec->e_phoff);
203 NEW_AUX_ENT(AT_PHENT, sizeof(struct elf_phdr));
204 NEW_AUX_ENT(AT_PHNUM, exec->e_phnum);
205 NEW_AUX_ENT(AT_BASE, interp_load_addr);
206 NEW_AUX_ENT(AT_FLAGS, 0);
207 NEW_AUX_ENT(AT_ENTRY, exec->e_entry);
208 NEW_AUX_ENT(AT_UID, tsk->uid);
209 NEW_AUX_ENT(AT_EUID, tsk->euid);
210 NEW_AUX_ENT(AT_GID, tsk->gid);
211 NEW_AUX_ENT(AT_EGID, tsk->egid);
212 NEW_AUX_ENT(AT_SECURE, security_bprm_secureexec(bprm));
214 NEW_AUX_ENT(AT_PLATFORM,
215 (elf_addr_t)(unsigned long)u_platform);
217 if (bprm->interp_flags & BINPRM_FLAGS_EXECFD) {
218 NEW_AUX_ENT(AT_EXECFD, bprm->interp_data);
221 /* AT_NULL is zero; clear the rest too */
222 memset(&elf_info[ei_index], 0,
223 sizeof current->mm->saved_auxv - ei_index * sizeof elf_info[0]);
225 /* And advance past the AT_NULL entry. */
228 sp = STACK_ADD(p, ei_index);
230 items = (argc + 1) + (envc + 1);
232 items += 3; /* a.out interpreters require argv & envp too */
234 items += 1; /* ELF interpreters only put argc on the stack */
236 bprm->p = STACK_ROUND(sp, items);
238 /* Point sp at the lowest address on the stack */
239 #ifdef CONFIG_STACK_GROWSUP
240 sp = (elf_addr_t __user *)bprm->p - items - ei_index;
241 bprm->exec = (unsigned long)sp; /* XXX: PARISC HACK */
243 sp = (elf_addr_t __user *)bprm->p;
246 /* Now, let's put argc (and argv, envp if appropriate) on the stack */
247 if (__put_user(argc, sp++))
251 envp = argv + argc + 1;
252 __put_user((elf_addr_t)(unsigned long)argv, sp++);
253 __put_user((elf_addr_t)(unsigned long)envp, sp++);
256 envp = argv + argc + 1;
259 /* Populate argv and envp */
260 p = current->mm->arg_end = current->mm->arg_start;
263 __put_user((elf_addr_t)p, argv++);
264 len = strnlen_user((void __user *)p, PAGE_SIZE*MAX_ARG_PAGES);
265 if (!len || len > PAGE_SIZE*MAX_ARG_PAGES)
269 if (__put_user(0, argv))
271 current->mm->arg_end = current->mm->env_start = p;
274 __put_user((elf_addr_t)p, envp++);
275 len = strnlen_user((void __user *)p, PAGE_SIZE*MAX_ARG_PAGES);
276 if (!len || len > PAGE_SIZE*MAX_ARG_PAGES)
280 if (__put_user(0, envp))
282 current->mm->env_end = p;
284 /* Put the elf_info on the stack in the right place. */
285 sp = (elf_addr_t __user *)envp + 1;
286 if (copy_to_user(sp, elf_info, ei_index * sizeof(elf_addr_t)))
293 static unsigned long elf_map(struct file *filep, unsigned long addr,
294 struct elf_phdr *eppnt, int prot, int type,
295 unsigned long total_size)
297 unsigned long map_addr;
298 unsigned long size = eppnt->p_filesz + ELF_PAGEOFFSET(eppnt->p_vaddr);
299 unsigned long off = eppnt->p_offset - ELF_PAGEOFFSET(eppnt->p_vaddr);
301 addr = ELF_PAGESTART(addr);
302 size = ELF_PAGEALIGN(size);
304 /* mmap() will return -EINVAL if given a zero size, but a
305 * segment with zero filesize is perfectly valid */
309 down_write(¤t->mm->mmap_sem);
311 * total_size is the size of the ELF (interpreter) image.
312 * The _first_ mmap needs to know the full size, otherwise
313 * randomization might put this image into an overlapping
314 * position with the ELF binary image. (since size < total_size)
315 * So we first map the 'big' image - and unmap the remainder at
316 * the end. (which unmap is needed for ELF images with holes.)
319 total_size = ELF_PAGEALIGN(total_size);
320 map_addr = do_mmap(filep, addr, total_size, prot, type, off);
321 if (!BAD_ADDR(map_addr))
322 do_munmap(current->mm, map_addr+size, total_size-size);
324 map_addr = do_mmap(filep, addr, size, prot, type, off);
326 up_write(¤t->mm->mmap_sem);
330 #endif /* !elf_map */
332 static inline unsigned long total_mapping_size(struct elf_phdr *cmds, int nr)
334 int i, first_idx = -1, last_idx = -1;
336 for (i = 0; i < nr; i++)
337 if (cmds[i].p_type == PT_LOAD) {
346 return cmds[last_idx].p_vaddr + cmds[last_idx].p_memsz -
347 ELF_PAGESTART(cmds[first_idx].p_vaddr);
351 /* This is much more generalized than the library routine read function,
352 so we keep this separate. Technically the library read function
353 is only provided so that we can read a.out libraries that have
356 static unsigned long load_elf_interp(struct elfhdr *interp_elf_ex,
357 struct file *interpreter, unsigned long *interp_map_addr,
358 unsigned long no_base)
360 struct elf_phdr *elf_phdata;
361 struct elf_phdr *eppnt;
362 unsigned long load_addr = 0;
363 int load_addr_set = 0;
364 unsigned long last_bss = 0, elf_bss = 0;
365 unsigned long error = ~0UL;
366 unsigned long total_size;
369 /* First of all, some simple consistency checks */
370 if (interp_elf_ex->e_type != ET_EXEC &&
371 interp_elf_ex->e_type != ET_DYN)
373 if (!elf_check_arch(interp_elf_ex))
375 if (!interpreter->f_op || !interpreter->f_op->mmap)
379 * If the size of this structure has changed, then punt, since
380 * we will be doing the wrong thing.
382 if (interp_elf_ex->e_phentsize != sizeof(struct elf_phdr))
384 if (interp_elf_ex->e_phnum < 1 ||
385 interp_elf_ex->e_phnum > 65536U / sizeof(struct elf_phdr))
388 /* Now read in all of the header information */
389 size = sizeof(struct elf_phdr) * interp_elf_ex->e_phnum;
390 if (size > ELF_MIN_ALIGN)
392 elf_phdata = kmalloc(size, GFP_KERNEL);
396 retval = kernel_read(interpreter, interp_elf_ex->e_phoff,
397 (char *)elf_phdata,size);
399 if (retval != size) {
405 total_size = total_mapping_size(elf_phdata, interp_elf_ex->e_phnum);
410 for (i = 0; i < interp_elf_ex->e_phnum; i++, eppnt++) {
411 if (eppnt->p_type == PT_LOAD) {
412 int elf_type = MAP_PRIVATE | MAP_DENYWRITE;
414 unsigned long vaddr = 0;
415 unsigned long k, map_addr;
417 if (eppnt->p_flags & PF_R)
418 elf_prot = PROT_READ;
419 if (eppnt->p_flags & PF_W)
420 elf_prot |= PROT_WRITE;
421 if (eppnt->p_flags & PF_X)
422 elf_prot |= PROT_EXEC;
423 vaddr = eppnt->p_vaddr;
424 if (interp_elf_ex->e_type == ET_EXEC || load_addr_set)
425 elf_type |= MAP_FIXED;
426 else if (no_base && interp_elf_ex->e_type == ET_DYN)
429 map_addr = elf_map(interpreter, load_addr + vaddr,
430 eppnt, elf_prot, elf_type, total_size);
432 if (!*interp_map_addr)
433 *interp_map_addr = map_addr;
435 if (BAD_ADDR(map_addr))
438 if (!load_addr_set &&
439 interp_elf_ex->e_type == ET_DYN) {
440 load_addr = map_addr - ELF_PAGESTART(vaddr);
445 * Check to see if the section's size will overflow the
446 * allowed task size. Note that p_filesz must always be
447 * <= p_memsize so it's only necessary to check p_memsz.
449 k = load_addr + eppnt->p_vaddr;
451 eppnt->p_filesz > eppnt->p_memsz ||
452 eppnt->p_memsz > TASK_SIZE ||
453 TASK_SIZE - eppnt->p_memsz < k) {
459 * Find the end of the file mapping for this phdr, and
460 * keep track of the largest address we see for this.
462 k = load_addr + eppnt->p_vaddr + eppnt->p_filesz;
467 * Do the same thing for the memory mapping - between
468 * elf_bss and last_bss is the bss section.
470 k = load_addr + eppnt->p_memsz + eppnt->p_vaddr;
477 * Now fill out the bss section. First pad the last page up
478 * to the page boundary, and then perform a mmap to make sure
479 * that there are zero-mapped pages up to and including the
482 if (padzero(elf_bss)) {
487 /* What we have mapped so far */
488 elf_bss = ELF_PAGESTART(elf_bss + ELF_MIN_ALIGN - 1);
490 /* Map the last of the bss segment */
491 if (last_bss > elf_bss) {
492 down_write(¤t->mm->mmap_sem);
493 error = do_brk(elf_bss, last_bss - elf_bss);
494 up_write(¤t->mm->mmap_sem);
507 static unsigned long load_aout_interp(struct exec *interp_ex,
508 struct file *interpreter)
510 unsigned long text_data, elf_entry = ~0UL;
514 current->mm->end_code = interp_ex->a_text;
515 text_data = interp_ex->a_text + interp_ex->a_data;
516 current->mm->end_data = text_data;
517 current->mm->brk = interp_ex->a_bss + text_data;
519 switch (N_MAGIC(*interp_ex)) {
522 addr = (char __user *)0;
526 offset = N_TXTOFF(*interp_ex);
527 addr = (char __user *)N_TXTADDR(*interp_ex);
533 down_write(¤t->mm->mmap_sem);
534 do_brk(0, text_data);
535 up_write(¤t->mm->mmap_sem);
536 if (!interpreter->f_op || !interpreter->f_op->read)
538 if (interpreter->f_op->read(interpreter, addr, text_data, &offset) < 0)
540 flush_icache_range((unsigned long)addr,
541 (unsigned long)addr + text_data);
543 down_write(¤t->mm->mmap_sem);
544 do_brk(ELF_PAGESTART(text_data + ELF_MIN_ALIGN - 1),
546 up_write(¤t->mm->mmap_sem);
547 elf_entry = interp_ex->a_entry;
554 * These are the functions used to load ELF style executables and shared
555 * libraries. There is no binary dependent code anywhere else.
558 #define INTERPRETER_NONE 0
559 #define INTERPRETER_AOUT 1
560 #define INTERPRETER_ELF 2
562 #ifndef STACK_RND_MASK
563 #define STACK_RND_MASK 0x7ff /* with 4K pages 8MB of VA */
566 static unsigned long randomize_stack_top(unsigned long stack_top)
568 unsigned int random_variable = 0;
570 if (current->flags & PF_RANDOMIZE) {
571 random_variable = get_random_int() & STACK_RND_MASK;
572 random_variable <<= PAGE_SHIFT;
574 #ifdef CONFIG_STACK_GROWSUP
575 return PAGE_ALIGN(stack_top) + random_variable;
577 return PAGE_ALIGN(stack_top) - random_variable;
581 static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
583 struct file *interpreter = NULL; /* to shut gcc up */
584 unsigned long load_addr = 0, load_bias = 0;
585 int load_addr_set = 0;
586 char * elf_interpreter = NULL;
587 unsigned int interpreter_type = INTERPRETER_NONE;
588 unsigned char ibcs2_interpreter = 0;
590 struct elf_phdr *elf_ppnt, *elf_phdata;
591 unsigned long elf_bss, elf_brk;
595 unsigned long elf_entry, interp_load_addr = 0, interp_map_addr = 0;
596 unsigned long start_code, end_code, start_data, end_data;
597 unsigned long reloc_func_desc = 0;
598 char passed_fileno[6];
599 struct files_struct *files;
600 int have_pt_gnu_stack, executable_stack;
601 unsigned long def_flags = 0;
603 struct elfhdr elf_ex;
604 struct elfhdr interp_elf_ex;
605 struct exec interp_ex;
608 loc = kmalloc(sizeof(*loc), GFP_KERNEL);
614 /* Get the exec-header */
615 loc->elf_ex = *((struct elfhdr *)bprm->buf);
618 /* First of all, some simple consistency checks */
619 if (memcmp(loc->elf_ex.e_ident, ELFMAG, SELFMAG) != 0)
622 if (loc->elf_ex.e_type != ET_EXEC && loc->elf_ex.e_type != ET_DYN)
624 if (!elf_check_arch(&loc->elf_ex))
626 if (!bprm->file->f_op||!bprm->file->f_op->mmap)
629 /* Now read in all of the header information */
630 if (loc->elf_ex.e_phentsize != sizeof(struct elf_phdr))
632 if (loc->elf_ex.e_phnum < 1 ||
633 loc->elf_ex.e_phnum > 65536U / sizeof(struct elf_phdr))
635 size = loc->elf_ex.e_phnum * sizeof(struct elf_phdr);
637 elf_phdata = kmalloc(size, GFP_KERNEL);
641 retval = kernel_read(bprm->file, loc->elf_ex.e_phoff,
642 (char *)elf_phdata, size);
643 if (retval != size) {
649 files = current->files; /* Refcounted so ok */
650 retval = unshare_files();
653 if (files == current->files) {
654 put_files_struct(files);
658 /* exec will make our files private anyway, but for the a.out
659 loader stuff we need to do it earlier */
660 retval = get_unused_fd();
663 get_file(bprm->file);
664 fd_install(elf_exec_fileno = retval, bprm->file);
666 elf_ppnt = elf_phdata;
675 for (i = 0; i < loc->elf_ex.e_phnum; i++) {
676 if (elf_ppnt->p_type == PT_INTERP) {
677 /* This is the program interpreter used for
678 * shared libraries - for now assume that this
679 * is an a.out format binary
682 if (elf_ppnt->p_filesz > PATH_MAX ||
683 elf_ppnt->p_filesz < 2)
687 elf_interpreter = kmalloc(elf_ppnt->p_filesz,
689 if (!elf_interpreter)
692 retval = kernel_read(bprm->file, elf_ppnt->p_offset,
695 if (retval != elf_ppnt->p_filesz) {
698 goto out_free_interp;
700 /* make sure path is NULL terminated */
702 if (elf_interpreter[elf_ppnt->p_filesz - 1] != '\0')
703 goto out_free_interp;
705 /* If the program interpreter is one of these two,
706 * then assume an iBCS2 image. Otherwise assume
707 * a native linux image.
709 if (strcmp(elf_interpreter,"/usr/lib/libc.so.1") == 0 ||
710 strcmp(elf_interpreter,"/usr/lib/ld.so.1") == 0)
711 ibcs2_interpreter = 1;
714 * The early SET_PERSONALITY here is so that the lookup
715 * for the interpreter happens in the namespace of the
716 * to-be-execed image. SET_PERSONALITY can select an
719 * However, SET_PERSONALITY is NOT allowed to switch
720 * this task into the new images's memory mapping
721 * policy - that is, TASK_SIZE must still evaluate to
722 * that which is appropriate to the execing application.
723 * This is because exit_mmap() needs to have TASK_SIZE
724 * evaluate to the size of the old image.
726 * So if (say) a 64-bit application is execing a 32-bit
727 * application it is the architecture's responsibility
728 * to defer changing the value of TASK_SIZE until the
729 * switch really is going to happen - do this in
730 * flush_thread(). - akpm
732 SET_PERSONALITY(loc->elf_ex, ibcs2_interpreter);
734 interpreter = open_exec(elf_interpreter);
735 retval = PTR_ERR(interpreter);
736 if (IS_ERR(interpreter))
737 goto out_free_interp;
738 retval = kernel_read(interpreter, 0, bprm->buf,
740 if (retval != BINPRM_BUF_SIZE) {
743 goto out_free_dentry;
746 /* Get the exec headers */
747 loc->interp_ex = *((struct exec *)bprm->buf);
748 loc->interp_elf_ex = *((struct elfhdr *)bprm->buf);
754 elf_ppnt = elf_phdata;
755 executable_stack = EXSTACK_DEFAULT;
757 for (i = 0; i < loc->elf_ex.e_phnum; i++, elf_ppnt++)
758 if (elf_ppnt->p_type == PT_GNU_STACK) {
759 if (elf_ppnt->p_flags & PF_X)
760 executable_stack = EXSTACK_ENABLE_X;
762 executable_stack = EXSTACK_DISABLE_X;
765 have_pt_gnu_stack = (i < loc->elf_ex.e_phnum);
767 if (current->personality == PER_LINUX && (exec_shield & 2)) {
768 executable_stack = EXSTACK_DISABLE_X;
769 current->flags |= PF_RANDOMIZE;
772 /* Some simple consistency checks for the interpreter */
773 if (elf_interpreter) {
774 interpreter_type = INTERPRETER_ELF | INTERPRETER_AOUT;
776 /* Now figure out which format our binary is */
777 if ((N_MAGIC(loc->interp_ex) != OMAGIC) &&
778 (N_MAGIC(loc->interp_ex) != ZMAGIC) &&
779 (N_MAGIC(loc->interp_ex) != QMAGIC))
780 interpreter_type = INTERPRETER_ELF;
782 if (memcmp(loc->interp_elf_ex.e_ident, ELFMAG, SELFMAG) != 0)
783 interpreter_type &= ~INTERPRETER_ELF;
786 if (!interpreter_type)
787 goto out_free_dentry;
789 /* Make sure only one type was selected */
790 if ((interpreter_type & INTERPRETER_ELF) &&
791 interpreter_type != INTERPRETER_ELF) {
792 // FIXME - ratelimit this before re-enabling
793 // printk(KERN_WARNING "ELF: Ambiguous type, using ELF\n");
794 interpreter_type = INTERPRETER_ELF;
796 /* Verify the interpreter has a valid arch */
797 if ((interpreter_type == INTERPRETER_ELF) &&
798 !elf_check_arch(&loc->interp_elf_ex))
799 goto out_free_dentry;
801 /* Executables without an interpreter also need a personality */
802 SET_PERSONALITY(loc->elf_ex, ibcs2_interpreter);
805 /* OK, we are done with that, now set up the arg stuff,
806 and then start this sucker up */
807 if ((!bprm->sh_bang) && (interpreter_type == INTERPRETER_AOUT)) {
808 char *passed_p = passed_fileno;
809 sprintf(passed_fileno, "%d", elf_exec_fileno);
811 if (elf_interpreter) {
812 retval = copy_strings_kernel(1, &passed_p, bprm);
814 goto out_free_dentry;
819 /* Flush all traces of the currently running executable */
820 retval = flush_old_exec(bprm);
822 goto out_free_dentry;
826 * Turn off the CS limit completely if exec-shield disabled or
829 if (!exec_shield || executable_stack != EXSTACK_DISABLE_X || nx_enabled)
830 arch_add_exec_range(current->mm, -1);
833 /* Discard our unneeded old files struct */
835 put_files_struct(files);
839 /* OK, This is the point of no return */
840 current->mm->start_data = 0;
841 current->mm->end_data = 0;
842 current->mm->end_code = 0;
843 current->mm->mmap = NULL;
844 current->flags &= ~PF_FORKNOEXEC;
845 current->mm->def_flags = def_flags;
847 /* Do this immediately, since STACK_TOP as used in setup_arg_pages
848 may depend on the personality. */
849 SET_PERSONALITY(loc->elf_ex, ibcs2_interpreter);
850 if (!(exec_shield & 2) &&
851 elf_read_implies_exec(loc->elf_ex, executable_stack))
852 current->personality |= READ_IMPLIES_EXEC;
854 if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
855 current->flags |= PF_RANDOMIZE;
856 arch_pick_mmap_layout(current->mm);
858 /* Do this so that we can load the interpreter, if need be. We will
859 change some of these later */
860 current->mm->free_area_cache = current->mm->mmap_base;
861 current->mm->cached_hole_size = 0;
862 retval = setup_arg_pages(bprm, randomize_stack_top(STACK_TOP),
865 send_sig(SIGKILL, current, 0);
866 goto out_free_dentry;
869 current->mm->start_stack = bprm->p;
871 /* Now we do a little grungy work by mmaping the ELF image into
872 the correct location in memory.
874 for(i = 0, elf_ppnt = elf_phdata;
875 i < loc->elf_ex.e_phnum; i++, elf_ppnt++) {
876 int elf_prot = 0, elf_flags;
877 unsigned long k, vaddr;
879 if (elf_ppnt->p_type != PT_LOAD)
882 if (unlikely (elf_brk > elf_bss)) {
885 /* There was a PT_LOAD segment with p_memsz > p_filesz
886 before this one. Map anonymous pages, if needed,
887 and clear the area. */
888 retval = set_brk (elf_bss + load_bias,
889 elf_brk + load_bias);
891 send_sig(SIGKILL, current, 0);
892 goto out_free_dentry;
894 nbyte = ELF_PAGEOFFSET(elf_bss);
896 nbyte = ELF_MIN_ALIGN - nbyte;
897 if (nbyte > elf_brk - elf_bss)
898 nbyte = elf_brk - elf_bss;
899 if (clear_user((void __user *)elf_bss +
902 * This bss-zeroing can fail if the ELF
903 * file specifies odd protections. So
904 * we don't check the return value
910 if (elf_ppnt->p_flags & PF_R)
911 elf_prot |= PROT_READ;
912 if (elf_ppnt->p_flags & PF_W)
913 elf_prot |= PROT_WRITE;
914 if (elf_ppnt->p_flags & PF_X)
915 elf_prot |= PROT_EXEC;
917 elf_flags = MAP_PRIVATE | MAP_DENYWRITE | MAP_EXECUTABLE;
919 vaddr = elf_ppnt->p_vaddr;
920 if (loc->elf_ex.e_type == ET_EXEC || load_addr_set)
921 elf_flags |= MAP_FIXED;
922 else if (loc->elf_ex.e_type == ET_DYN)
926 load_bias = ELF_PAGESTART(ELF_ET_DYN_BASE - vaddr);
929 error = elf_map(bprm->file, load_bias + vaddr, elf_ppnt,
930 elf_prot, elf_flags, 0);
931 if (BAD_ADDR(error)) {
932 send_sig(SIGKILL, current, 0);
933 goto out_free_dentry;
936 if (!load_addr_set) {
938 load_addr = (elf_ppnt->p_vaddr - elf_ppnt->p_offset);
939 if (loc->elf_ex.e_type == ET_DYN) {
941 ELF_PAGESTART(load_bias + vaddr);
942 load_addr += load_bias;
943 reloc_func_desc = load_bias;
946 k = elf_ppnt->p_vaddr;
953 * Check to see if the section's size will overflow the
954 * allowed task size. Note that p_filesz must always be
955 * <= p_memsz so it is only necessary to check p_memsz.
957 if (BAD_ADDR(k) || elf_ppnt->p_filesz > elf_ppnt->p_memsz ||
958 elf_ppnt->p_memsz > TASK_SIZE ||
959 TASK_SIZE - elf_ppnt->p_memsz < k) {
960 /* set_brk can never work. Avoid overflows. */
961 send_sig(SIGKILL, current, 0);
962 goto out_free_dentry;
965 k = elf_ppnt->p_vaddr + elf_ppnt->p_filesz;
969 if ((elf_ppnt->p_flags & PF_X) && end_code < k)
973 k = elf_ppnt->p_vaddr + elf_ppnt->p_memsz;
978 loc->elf_ex.e_entry += load_bias;
979 elf_bss += load_bias;
980 elf_brk += load_bias;
981 start_code += load_bias;
982 end_code += load_bias;
983 start_data += load_bias;
984 end_data += load_bias;
986 /* Calling set_brk effectively mmaps the pages that we need
987 * for the bss and break sections. We must do this before
988 * mapping in the interpreter, to make sure it doesn't wind
989 * up getting placed where the bss needs to go.
991 retval = set_brk(elf_bss, elf_brk);
993 send_sig(SIGKILL, current, 0);
994 goto out_free_dentry;
996 if (likely(elf_bss != elf_brk) && unlikely(padzero(elf_bss))) {
997 send_sig(SIGSEGV, current, 0);
998 retval = -EFAULT; /* Nobody gets to see this, but.. */
999 goto out_free_dentry;
1002 if (elf_interpreter) {
1003 if (interpreter_type == INTERPRETER_AOUT)
1004 elf_entry = load_aout_interp(&loc->interp_ex,
1007 elf_entry = load_elf_interp(&loc->interp_elf_ex,
1011 if (!BAD_ADDR(elf_entry)) {
1012 /* load_elf_interp() returns relocation adjustment */
1013 interp_load_addr = elf_entry;
1014 elf_entry += loc->interp_elf_ex.e_entry;
1017 if (BAD_ADDR(elf_entry)) {
1018 force_sig(SIGSEGV, current);
1019 retval = IS_ERR((void *)elf_entry) ?
1020 (int)elf_entry : -EINVAL;
1021 goto out_free_dentry;
1023 reloc_func_desc = interp_load_addr;
1025 allow_write_access(interpreter);
1027 kfree(elf_interpreter);
1029 elf_entry = loc->elf_ex.e_entry;
1030 if (BAD_ADDR(elf_entry)) {
1031 force_sig(SIGSEGV, current);
1033 goto out_free_dentry;
1037 if (interpreter_type != INTERPRETER_AOUT)
1038 sys_close(elf_exec_fileno);
1040 set_binfmt(&elf_format);
1042 #ifdef ARCH_HAS_SETUP_ADDITIONAL_PAGES
1043 retval = arch_setup_additional_pages(bprm, executable_stack,
1044 start_code, interp_map_addr);
1046 send_sig(SIGKILL, current, 0);
1049 #endif /* ARCH_HAS_SETUP_ADDITIONAL_PAGES */
1053 compute_creds(bprm);
1054 current->flags &= ~PF_FORKNOEXEC;
1055 create_elf_tables(bprm, &loc->elf_ex,
1056 (interpreter_type == INTERPRETER_AOUT),
1057 load_addr, interp_load_addr);
1058 /* N.B. passed_fileno might not be initialized? */
1059 if (interpreter_type == INTERPRETER_AOUT)
1060 current->mm->arg_start += strlen(passed_fileno) + 1;
1061 current->mm->end_code = end_code;
1062 current->mm->start_code = start_code;
1063 current->mm->start_data = start_data;
1064 current->mm->end_data = end_data;
1065 current->mm->start_stack = bprm->p;
1067 #ifdef __HAVE_ARCH_RANDOMIZE_BRK
1068 if (current->flags & PF_RANDOMIZE)
1069 randomize_brk(elf_brk);
1071 if (current->personality & MMAP_PAGE_ZERO) {
1072 /* Why this, you ask??? Well SVr4 maps page 0 as read-only,
1073 and some applications "depend" upon this behavior.
1074 Since we do not have the power to recompile these, we
1075 emulate the SVr4 behavior. Sigh. */
1076 down_write(¤t->mm->mmap_sem);
1077 error = do_mmap(NULL, 0, PAGE_SIZE, PROT_READ | PROT_EXEC,
1078 MAP_FIXED | MAP_PRIVATE, 0);
1079 up_write(¤t->mm->mmap_sem);
1082 #ifdef ELF_PLAT_INIT
1084 * The ABI may specify that certain registers be set up in special
1085 * ways (on i386 %edx is the address of a DT_FINI function, for
1086 * example. In addition, it may also specify (eg, PowerPC64 ELF)
1087 * that the e_entry field is the address of the function descriptor
1088 * for the startup routine, rather than the address of the startup
1089 * routine itself. This macro performs whatever initialization to
1090 * the regs structure is required as well as any relocations to the
1091 * function descriptor entries when executing dynamically links apps.
1093 ELF_PLAT_INIT(regs, reloc_func_desc);
1096 start_thread(regs, elf_entry, bprm->p);
1097 if (unlikely(current->ptrace & PT_PTRACED)) {
1098 if (current->ptrace & PT_TRACE_EXEC)
1099 ptrace_notify ((PTRACE_EVENT_EXEC << 8) | SIGTRAP);
1101 send_sig(SIGTRAP, current, 0);
1111 allow_write_access(interpreter);
1115 kfree(elf_interpreter);
1117 sys_close(elf_exec_fileno);
1120 put_files_struct(current->files);
1121 current->files = files;
1128 /* This is really simpleminded and specialized - we are loading an
1129 a.out library that is given an ELF header. */
1130 static int load_elf_library(struct file *file)
1132 struct elf_phdr *elf_phdata;
1133 struct elf_phdr *eppnt;
1134 unsigned long elf_bss, bss, len;
1135 int retval, error, i, j;
1136 struct elfhdr elf_ex;
1139 retval = kernel_read(file, 0, (char *)&elf_ex, sizeof(elf_ex));
1140 if (retval != sizeof(elf_ex))
1143 if (memcmp(elf_ex.e_ident, ELFMAG, SELFMAG) != 0)
1146 /* First of all, some simple consistency checks */
1147 if (elf_ex.e_type != ET_EXEC || elf_ex.e_phnum > 2 ||
1148 !elf_check_arch(&elf_ex) || !file->f_op || !file->f_op->mmap)
1151 /* Now read in all of the header information */
1153 j = sizeof(struct elf_phdr) * elf_ex.e_phnum;
1154 /* j < ELF_MIN_ALIGN because elf_ex.e_phnum <= 2 */
1157 elf_phdata = kmalloc(j, GFP_KERNEL);
1163 retval = kernel_read(file, elf_ex.e_phoff, (char *)eppnt, j);
1167 for (j = 0, i = 0; i<elf_ex.e_phnum; i++)
1168 if ((eppnt + i)->p_type == PT_LOAD)
1173 while (eppnt->p_type != PT_LOAD)
1176 /* Now use mmap to map the library into memory. */
1177 down_write(¤t->mm->mmap_sem);
1178 error = do_mmap(file,
1179 ELF_PAGESTART(eppnt->p_vaddr),
1181 ELF_PAGEOFFSET(eppnt->p_vaddr)),
1182 PROT_READ | PROT_WRITE | PROT_EXEC,
1183 MAP_FIXED | MAP_PRIVATE | MAP_DENYWRITE,
1185 ELF_PAGEOFFSET(eppnt->p_vaddr)));
1186 up_write(¤t->mm->mmap_sem);
1187 if (error != ELF_PAGESTART(eppnt->p_vaddr))
1190 elf_bss = eppnt->p_vaddr + eppnt->p_filesz;
1191 if (padzero(elf_bss)) {
1196 len = ELF_PAGESTART(eppnt->p_filesz + eppnt->p_vaddr +
1198 bss = eppnt->p_memsz + eppnt->p_vaddr;
1200 down_write(¤t->mm->mmap_sem);
1201 do_brk(len, bss - len);
1202 up_write(¤t->mm->mmap_sem);
1213 * Note that some platforms still use traditional core dumps and not
1214 * the ELF core dump. Each platform can select it as appropriate.
1216 #if defined(USE_ELF_CORE_DUMP) && defined(CONFIG_ELF_CORE)
1221 * Modelled on fs/exec.c:aout_core_dump()
1222 * Jeremy Fitzhardinge <jeremy@sw.oz.au>
1225 * These are the only things you should do on a core-file: use only these
1226 * functions to write out all the necessary info.
1228 static int dump_write(struct file *file, const void *addr, int nr)
1230 return file->f_op->write(file, addr, nr, &file->f_pos) == nr;
1233 static int dump_seek(struct file *file, loff_t off)
1235 if (file->f_op->llseek) {
1236 if (file->f_op->llseek(file, off, 0) != off)
1244 * Decide whether a segment is worth dumping; default is yes to be
1245 * sure (missing info is worse than too much; etc).
1246 * Personally I'd include everything, and use the coredump limit...
1248 * I think we should skip something. But I am not sure how. H.J.
1250 static int maydump(struct vm_area_struct *vma)
1252 /* Do not dump I/O mapped devices or special mappings */
1253 if (vma->vm_flags & (VM_IO | VM_RESERVED))
1256 if (vma->vm_flags & VM_DONTEXPAND) /* Kludge for vDSO. */
1259 /* Dump shared memory only if mapped from an anonymous file. */
1260 if (vma->vm_flags & VM_SHARED)
1261 return vma->vm_file->f_dentry->d_inode->i_nlink == 0;
1263 /* If it hasn't been written to, don't write it out */
1270 /* An ELF note in memory */
1275 unsigned int datasz;
1279 static int notesize(struct memelfnote *en)
1283 sz = sizeof(struct elf_note);
1284 sz += roundup(strlen(en->name) + 1, 4);
1285 sz += roundup(en->datasz, 4);
1290 #define DUMP_WRITE(addr, nr) \
1291 do { if (!dump_write(file, (addr), (nr))) return 0; } while(0)
1292 #define DUMP_SEEK(off) \
1293 do { if (!dump_seek(file, (off))) return 0; } while(0)
1295 static int writenote(struct memelfnote *men, struct file *file)
1299 en.n_namesz = strlen(men->name) + 1;
1300 en.n_descsz = men->datasz;
1301 en.n_type = men->type;
1303 DUMP_WRITE(&en, sizeof(en));
1304 DUMP_WRITE(men->name, en.n_namesz);
1305 /* XXX - cast from long long to long to avoid need for libgcc.a */
1306 DUMP_SEEK(roundup((unsigned long)file->f_pos, 4)); /* XXX */
1307 DUMP_WRITE(men->data, men->datasz);
1308 DUMP_SEEK(roundup((unsigned long)file->f_pos, 4)); /* XXX */
1315 #define DUMP_WRITE(addr, nr) \
1316 if ((size += (nr)) > limit || !dump_write(file, (addr), (nr))) \
1318 #define DUMP_SEEK(off) \
1319 if (!dump_seek(file, (off))) \
1322 static void fill_elf_header(struct elfhdr *elf, int segs)
1324 memcpy(elf->e_ident, ELFMAG, SELFMAG);
1325 elf->e_ident[EI_CLASS] = ELF_CLASS;
1326 elf->e_ident[EI_DATA] = ELF_DATA;
1327 elf->e_ident[EI_VERSION] = EV_CURRENT;
1328 elf->e_ident[EI_OSABI] = ELF_OSABI;
1329 memset(elf->e_ident+EI_PAD, 0, EI_NIDENT-EI_PAD);
1331 elf->e_type = ET_CORE;
1332 elf->e_machine = ELF_ARCH;
1333 elf->e_version = EV_CURRENT;
1335 elf->e_phoff = sizeof(struct elfhdr);
1337 elf->e_flags = ELF_CORE_EFLAGS;
1338 elf->e_ehsize = sizeof(struct elfhdr);
1339 elf->e_phentsize = sizeof(struct elf_phdr);
1340 elf->e_phnum = segs;
1341 elf->e_shentsize = 0;
1343 elf->e_shstrndx = 0;
1347 static void fill_elf_note_phdr(struct elf_phdr *phdr, int sz, off_t offset)
1349 phdr->p_type = PT_NOTE;
1350 phdr->p_offset = offset;
1353 phdr->p_filesz = sz;
1360 static void fill_note(struct memelfnote *note, const char *name, int type,
1361 unsigned int sz, void *data)
1371 * fill up all the fields in prstatus from the given task struct, except
1372 * registers which need to be filled up separately.
1374 static void fill_prstatus(struct elf_prstatus *prstatus,
1375 struct task_struct *p, long signr)
1377 prstatus->pr_info.si_signo = prstatus->pr_cursig = signr;
1378 prstatus->pr_sigpend = p->pending.signal.sig[0];
1379 prstatus->pr_sighold = p->blocked.sig[0];
1380 prstatus->pr_pid = p->pid;
1381 prstatus->pr_ppid = p->parent->pid;
1382 prstatus->pr_pgrp = process_group(p);
1383 prstatus->pr_sid = p->signal->session;
1384 if (thread_group_leader(p)) {
1386 * This is the record for the group leader. Add in the
1387 * cumulative times of previous dead threads. This total
1388 * won't include the time of each live thread whose state
1389 * is included in the core dump. The final total reported
1390 * to our parent process when it calls wait4 will include
1391 * those sums as well as the little bit more time it takes
1392 * this and each other thread to finish dying after the
1393 * core dump synchronization phase.
1395 cputime_to_timeval(cputime_add(p->utime, p->signal->utime),
1396 &prstatus->pr_utime);
1397 cputime_to_timeval(cputime_add(p->stime, p->signal->stime),
1398 &prstatus->pr_stime);
1400 cputime_to_timeval(p->utime, &prstatus->pr_utime);
1401 cputime_to_timeval(p->stime, &prstatus->pr_stime);
1403 cputime_to_timeval(p->signal->cutime, &prstatus->pr_cutime);
1404 cputime_to_timeval(p->signal->cstime, &prstatus->pr_cstime);
1407 static int fill_psinfo(struct elf_prpsinfo *psinfo, struct task_struct *p,
1408 struct mm_struct *mm)
1410 unsigned int i, len;
1412 /* first copy the parameters from user space */
1413 memset(psinfo, 0, sizeof(struct elf_prpsinfo));
1415 len = mm->arg_end - mm->arg_start;
1416 if (len >= ELF_PRARGSZ)
1417 len = ELF_PRARGSZ-1;
1418 if (copy_from_user(&psinfo->pr_psargs,
1419 (const char __user *)mm->arg_start, len))
1421 for(i = 0; i < len; i++)
1422 if (psinfo->pr_psargs[i] == 0)
1423 psinfo->pr_psargs[i] = ' ';
1424 psinfo->pr_psargs[len] = 0;
1426 psinfo->pr_pid = p->pid;
1427 psinfo->pr_ppid = p->parent->pid;
1428 psinfo->pr_pgrp = process_group(p);
1429 psinfo->pr_sid = p->signal->session;
1431 i = p->state ? ffz(~p->state) + 1 : 0;
1432 psinfo->pr_state = i;
1433 psinfo->pr_sname = (i > 5) ? '.' : "RSDTZW"[i];
1434 psinfo->pr_zomb = psinfo->pr_sname == 'Z';
1435 psinfo->pr_nice = task_nice(p);
1436 psinfo->pr_flag = p->flags;
1437 SET_UID(psinfo->pr_uid, p->uid);
1438 SET_GID(psinfo->pr_gid, p->gid);
1439 strncpy(psinfo->pr_fname, p->comm, sizeof(psinfo->pr_fname));
1444 /* Here is the structure in which status of each thread is captured. */
1445 struct elf_thread_status
1447 struct list_head list;
1448 struct elf_prstatus prstatus; /* NT_PRSTATUS */
1449 elf_fpregset_t fpu; /* NT_PRFPREG */
1450 struct task_struct *thread;
1451 #ifdef ELF_CORE_COPY_XFPREGS
1452 elf_fpxregset_t xfpu; /* NT_PRXFPREG */
1454 struct memelfnote notes[3];
1459 * In order to add the specific thread information for the elf file format,
1460 * we need to keep a linked list of every threads pr_status and then create
1461 * a single section for them in the final core file.
1463 static int elf_dump_thread_status(long signr, struct elf_thread_status *t)
1466 struct task_struct *p = t->thread;
1469 fill_prstatus(&t->prstatus, p, signr);
1470 elf_core_copy_task_regs(p, &t->prstatus.pr_reg);
1472 fill_note(&t->notes[0], "CORE", NT_PRSTATUS, sizeof(t->prstatus),
1475 sz += notesize(&t->notes[0]);
1477 if ((t->prstatus.pr_fpvalid = elf_core_copy_task_fpregs(p, NULL,
1479 fill_note(&t->notes[1], "CORE", NT_PRFPREG, sizeof(t->fpu),
1482 sz += notesize(&t->notes[1]);
1485 #ifdef ELF_CORE_COPY_XFPREGS
1486 if (elf_core_copy_task_xfpregs(p, &t->xfpu)) {
1487 fill_note(&t->notes[2], "LINUX", NT_PRXFPREG, sizeof(t->xfpu),
1490 sz += notesize(&t->notes[2]);
1499 * This is a two-pass process; first we find the offsets of the bits,
1500 * and then they are actually written out. If we run out of core limit
1503 static int elf_core_dump(long signr, struct pt_regs *regs, struct file *file)
1511 struct vm_area_struct *vma;
1512 struct elfhdr *elf = NULL;
1513 off_t offset = 0, dataoff;
1514 unsigned long limit = current->signal->rlim[RLIMIT_CORE].rlim_cur;
1516 struct memelfnote *notes = NULL;
1517 struct elf_prstatus *prstatus = NULL; /* NT_PRSTATUS */
1518 struct elf_prpsinfo *psinfo = NULL; /* NT_PRPSINFO */
1519 struct task_struct *g, *p;
1520 LIST_HEAD(thread_list);
1521 struct list_head *t;
1522 elf_fpregset_t *fpu = NULL;
1523 #ifdef ELF_CORE_COPY_XFPREGS
1524 elf_fpxregset_t *xfpu = NULL;
1526 int thread_status_size = 0;
1530 * We no longer stop all VM operations.
1532 * This is because those proceses that could possibly change map_count
1533 * or the mmap / vma pages are now blocked in do_exit on current
1534 * finishing this core dump.
1536 * Only ptrace can touch these memory addresses, but it doesn't change
1537 * the map_count or the pages allocated. So no possibility of crashing
1538 * exists while dumping the mm->vm_next areas to the core file.
1541 /* alloc memory for large data structures: too large to be on stack */
1542 elf = kmalloc(sizeof(*elf), GFP_KERNEL);
1545 prstatus = kmalloc(sizeof(*prstatus), GFP_KERNEL);
1548 psinfo = kmalloc(sizeof(*psinfo), GFP_KERNEL);
1551 notes = kmalloc(NUM_NOTES * sizeof(struct memelfnote), GFP_KERNEL);
1554 fpu = kmalloc(sizeof(*fpu), GFP_KERNEL);
1557 #ifdef ELF_CORE_COPY_XFPREGS
1558 xfpu = kmalloc(sizeof(*xfpu), GFP_KERNEL);
1564 struct elf_thread_status *tmp;
1565 read_lock(&tasklist_lock);
1567 if (current->mm == p->mm && current != p) {
1568 tmp = kzalloc(sizeof(*tmp), GFP_ATOMIC);
1570 read_unlock(&tasklist_lock);
1573 INIT_LIST_HEAD(&tmp->list);
1575 list_add(&tmp->list, &thread_list);
1577 while_each_thread(g,p);
1578 read_unlock(&tasklist_lock);
1579 list_for_each(t, &thread_list) {
1580 struct elf_thread_status *tmp;
1583 tmp = list_entry(t, struct elf_thread_status, list);
1584 sz = elf_dump_thread_status(signr, tmp);
1585 thread_status_size += sz;
1588 /* now collect the dump for the current */
1589 memset(prstatus, 0, sizeof(*prstatus));
1590 fill_prstatus(prstatus, current, signr);
1591 elf_core_copy_regs(&prstatus->pr_reg, regs);
1593 segs = current->mm->map_count;
1594 #ifdef ELF_CORE_EXTRA_PHDRS
1595 segs += ELF_CORE_EXTRA_PHDRS;
1599 fill_elf_header(elf, segs + 1); /* including notes section */
1602 current->flags |= PF_DUMPCORE;
1605 * Set up the notes in similar form to SVR4 core dumps made
1606 * with info from their /proc.
1609 fill_note(notes + 0, "CORE", NT_PRSTATUS, sizeof(*prstatus), prstatus);
1610 fill_psinfo(psinfo, current->group_leader, current->mm);
1611 fill_note(notes + 1, "CORE", NT_PRPSINFO, sizeof(*psinfo), psinfo);
1615 auxv = (elf_addr_t *)current->mm->saved_auxv;
1620 while (auxv[i - 2] != AT_NULL);
1621 fill_note(¬es[numnote++], "CORE", NT_AUXV,
1622 i * sizeof(elf_addr_t), auxv);
1624 /* Try to dump the FPU. */
1625 if ((prstatus->pr_fpvalid =
1626 elf_core_copy_task_fpregs(current, regs, fpu)))
1627 fill_note(notes + numnote++,
1628 "CORE", NT_PRFPREG, sizeof(*fpu), fpu);
1629 #ifdef ELF_CORE_COPY_XFPREGS
1630 if (elf_core_copy_task_xfpregs(current, xfpu))
1631 fill_note(notes + numnote++,
1632 "LINUX", NT_PRXFPREG, sizeof(*xfpu), xfpu);
1638 DUMP_WRITE(elf, sizeof(*elf));
1639 offset += sizeof(*elf); /* Elf header */
1640 offset += (segs+1) * sizeof(struct elf_phdr); /* Program headers */
1642 /* Write notes phdr entry */
1644 struct elf_phdr phdr;
1647 for (i = 0; i < numnote; i++)
1648 sz += notesize(notes + i);
1650 sz += thread_status_size;
1652 fill_elf_note_phdr(&phdr, sz, offset);
1654 DUMP_WRITE(&phdr, sizeof(phdr));
1657 /* Page-align dumped data */
1658 dataoff = offset = roundup(offset, ELF_EXEC_PAGESIZE);
1660 /* Write program headers for segments dump */
1661 for (vma = current->mm->mmap; vma != NULL; vma = vma->vm_next) {
1662 struct elf_phdr phdr;
1665 sz = vma->vm_end - vma->vm_start;
1667 phdr.p_type = PT_LOAD;
1668 phdr.p_offset = offset;
1669 phdr.p_vaddr = vma->vm_start;
1671 phdr.p_filesz = maydump(vma) ? sz : 0;
1673 offset += phdr.p_filesz;
1674 phdr.p_flags = vma->vm_flags & VM_READ ? PF_R : 0;
1675 if (vma->vm_flags & VM_WRITE)
1676 phdr.p_flags |= PF_W;
1677 if (vma->vm_flags & VM_EXEC)
1678 phdr.p_flags |= PF_X;
1679 phdr.p_align = ELF_EXEC_PAGESIZE;
1681 DUMP_WRITE(&phdr, sizeof(phdr));
1684 #ifdef ELF_CORE_WRITE_EXTRA_PHDRS
1685 ELF_CORE_WRITE_EXTRA_PHDRS;
1688 /* write out the notes section */
1689 for (i = 0; i < numnote; i++)
1690 if (!writenote(notes + i, file))
1693 /* write out the thread status notes section */
1694 list_for_each(t, &thread_list) {
1695 struct elf_thread_status *tmp =
1696 list_entry(t, struct elf_thread_status, list);
1698 for (i = 0; i < tmp->num_notes; i++)
1699 if (!writenote(&tmp->notes[i], file))
1705 for (vma = current->mm->mmap; vma != NULL; vma = vma->vm_next) {
1711 for (addr = vma->vm_start;
1713 addr += PAGE_SIZE) {
1715 struct vm_area_struct *vma;
1717 if (get_user_pages(current, current->mm, addr, 1, 0, 1,
1718 &page, &vma) <= 0) {
1719 DUMP_SEEK(file->f_pos + PAGE_SIZE);
1721 if (page == ZERO_PAGE(addr)) {
1722 DUMP_SEEK(file->f_pos + PAGE_SIZE);
1725 flush_cache_page(vma, addr,
1728 if ((size += PAGE_SIZE) > limit ||
1729 !dump_write(file, kaddr,
1732 page_cache_release(page);
1737 page_cache_release(page);
1742 #ifdef ELF_CORE_WRITE_EXTRA_DATA
1743 ELF_CORE_WRITE_EXTRA_DATA;
1746 if ((off_t)file->f_pos != offset) {
1749 "elf_core_dump: file->f_pos (%ld) != offset (%ld)\n",
1750 (off_t)file->f_pos, offset);
1757 while (!list_empty(&thread_list)) {
1758 struct list_head *tmp = thread_list.next;
1760 kfree(list_entry(tmp, struct elf_thread_status, list));
1768 #ifdef ELF_CORE_COPY_XFPREGS
1775 #endif /* USE_ELF_CORE_DUMP */
1777 static int __init init_elf_binfmt(void)
1779 return register_binfmt(&elf_format);
1782 static void __exit exit_elf_binfmt(void)
1784 /* Remove the COFF and ELF loaders. */
1785 unregister_binfmt(&elf_format);
1788 core_initcall(init_elf_binfmt);
1789 module_exit(exit_elf_binfmt);
1790 MODULE_LICENSE("GPL");