2 * linux/fs/binfmt_elf.c
4 * These are the functions used to load ELF format executables as used
5 * on SVr4 machines. Information on the format may be found in the book
6 * "UNIX SYSTEM V RELEASE 4 Programmers Guide: Ansi C and Programming Support
9 * Copyright 1993, 1994: Eric Youngdale (ericy@cais.com).
12 #include <linux/module.h>
13 #include <linux/kernel.h>
15 #include <linux/stat.h>
16 #include <linux/time.h>
18 #include <linux/mman.h>
19 #include <linux/a.out.h>
20 #include <linux/errno.h>
21 #include <linux/signal.h>
22 #include <linux/binfmts.h>
23 #include <linux/string.h>
24 #include <linux/file.h>
25 #include <linux/fcntl.h>
26 #include <linux/ptrace.h>
27 #include <linux/slab.h>
28 #include <linux/shm.h>
29 #include <linux/personality.h>
30 #include <linux/elfcore.h>
31 #include <linux/init.h>
32 #include <linux/highuid.h>
33 #include <linux/smp.h>
34 #include <linux/smp_lock.h>
35 #include <linux/compiler.h>
36 #include <linux/highmem.h>
37 #include <linux/pagemap.h>
38 #include <linux/security.h>
39 #include <linux/syscalls.h>
40 #include <linux/vs_memory.h>
42 #include <asm/uaccess.h>
43 #include <asm/param.h>
46 #include <linux/elf.h>
48 static int load_elf_binary(struct linux_binprm * bprm, struct pt_regs * regs);
49 static int load_elf_library(struct file*);
50 static unsigned long elf_map (struct file *, unsigned long, struct elf_phdr *, int, int, unsigned long);
51 extern int dump_fpu (struct pt_regs *, elf_fpregset_t *);
54 #define elf_addr_t unsigned long
58 * If we don't support core dumping, then supply a NULL so we
61 #ifdef USE_ELF_CORE_DUMP
62 static int elf_core_dump(long signr, struct pt_regs * regs, struct file * file);
64 #define elf_core_dump NULL
67 #if ELF_EXEC_PAGESIZE > PAGE_SIZE
68 # define ELF_MIN_ALIGN ELF_EXEC_PAGESIZE
70 # define ELF_MIN_ALIGN PAGE_SIZE
73 #define ELF_PAGESTART(_v) ((_v) & ~(unsigned long)(ELF_MIN_ALIGN-1))
74 #define ELF_PAGEOFFSET(_v) ((_v) & (ELF_MIN_ALIGN-1))
75 #define ELF_PAGEALIGN(_v) (((_v) + ELF_MIN_ALIGN - 1) & ~(ELF_MIN_ALIGN - 1))
77 static struct linux_binfmt elf_format = {
78 .module = THIS_MODULE,
79 .load_binary = load_elf_binary,
80 .load_shlib = load_elf_library,
81 .core_dump = elf_core_dump,
82 .min_coredump = ELF_EXEC_PAGESIZE
85 #define BAD_ADDR(x) ((unsigned long)(x) > TASK_SIZE)
87 static int set_brk(unsigned long start, unsigned long end)
89 start = ELF_PAGEALIGN(start);
90 end = ELF_PAGEALIGN(end);
92 unsigned long addr = do_brk(start, end - start);
96 current->mm->start_brk = current->mm->brk = end;
101 /* We need to explicitly zero any fractional pages
102 after the data section (i.e. bss). This would
103 contain the junk from the file that should not
107 static void padzero(unsigned long elf_bss)
111 nbyte = ELF_PAGEOFFSET(elf_bss);
113 nbyte = ELF_MIN_ALIGN - nbyte;
114 clear_user((void __user *) elf_bss, nbyte);
118 /* Let's use some macros to make this stack manipulation a litle clearer */
119 #ifdef CONFIG_STACK_GROWSUP
120 #define STACK_ADD(sp, items) ((elf_addr_t __user *)(sp) + (items))
121 #define STACK_ROUND(sp, items) \
122 ((15 + (unsigned long) ((sp) + (items))) &~ 15UL)
123 #define STACK_ALLOC(sp, len) ({ elf_addr_t __user *old_sp = (elf_addr_t __user *)sp; sp += len; old_sp; })
125 #define STACK_ADD(sp, items) ((elf_addr_t __user *)(sp) - (items))
126 #define STACK_ROUND(sp, items) \
127 (((unsigned long) (sp - items)) &~ 15UL)
128 #define STACK_ALLOC(sp, len) ({ sp -= len ; sp; })
132 create_elf_tables(struct linux_binprm *bprm, struct elfhdr * exec,
133 int interp_aout, unsigned long load_addr,
134 unsigned long interp_load_addr)
136 unsigned long p = bprm->p;
137 int argc = bprm->argc;
138 int envc = bprm->envc;
139 elf_addr_t __user *argv;
140 elf_addr_t __user *envp;
141 elf_addr_t __user *sp;
142 elf_addr_t __user *u_platform;
143 const char *k_platform = ELF_PLATFORM;
145 elf_addr_t *elf_info;
147 struct task_struct *tsk = current;
150 * If this architecture has a platform capability string, copy it
151 * to userspace. In some cases (Sparc), this info is impossible
152 * for userspace to get any other way, in others (i386) it is
158 size_t len = strlen(k_platform) + 1;
160 #ifdef __HAVE_ARCH_ALIGN_STACK
161 p = (unsigned long)arch_align_stack((unsigned long)p);
163 u_platform = (elf_addr_t __user *)STACK_ALLOC(p, len);
164 __copy_to_user(u_platform, k_platform, len);
167 /* Create the ELF interpreter info */
168 elf_info = (elf_addr_t *) current->mm->saved_auxv;
169 #define NEW_AUX_ENT(id, val) \
170 do { elf_info[ei_index++] = id; elf_info[ei_index++] = val; } while (0)
174 * ARCH_DLINFO must come first so PPC can do its special alignment of
179 NEW_AUX_ENT(AT_HWCAP, ELF_HWCAP);
180 NEW_AUX_ENT(AT_PAGESZ, ELF_EXEC_PAGESIZE);
181 NEW_AUX_ENT(AT_CLKTCK, CLOCKS_PER_SEC);
182 NEW_AUX_ENT(AT_PHDR, load_addr + exec->e_phoff);
183 NEW_AUX_ENT(AT_PHENT, sizeof (struct elf_phdr));
184 NEW_AUX_ENT(AT_PHNUM, exec->e_phnum);
185 NEW_AUX_ENT(AT_BASE, interp_load_addr);
186 NEW_AUX_ENT(AT_FLAGS, 0);
187 NEW_AUX_ENT(AT_ENTRY, exec->e_entry);
188 NEW_AUX_ENT(AT_UID, (elf_addr_t) tsk->uid);
189 NEW_AUX_ENT(AT_EUID, (elf_addr_t) tsk->euid);
190 NEW_AUX_ENT(AT_GID, (elf_addr_t) tsk->gid);
191 NEW_AUX_ENT(AT_EGID, (elf_addr_t) tsk->egid);
192 NEW_AUX_ENT(AT_SECURE, (elf_addr_t) security_bprm_secureexec(bprm));
194 NEW_AUX_ENT(AT_PLATFORM, (elf_addr_t)(unsigned long)u_platform);
196 if (bprm->interp_flags & BINPRM_FLAGS_EXECFD) {
197 NEW_AUX_ENT(AT_EXECFD, (elf_addr_t) bprm->interp_data);
200 /* AT_NULL is zero; clear the rest too */
201 memset(&elf_info[ei_index], 0,
202 sizeof current->mm->saved_auxv - ei_index * sizeof elf_info[0]);
204 /* And advance past the AT_NULL entry. */
207 sp = STACK_ADD(p, ei_index);
209 items = (argc + 1) + (envc + 1);
211 items += 3; /* a.out interpreters require argv & envp too */
213 items += 1; /* ELF interpreters only put argc on the stack */
215 bprm->p = STACK_ROUND(sp, items);
217 /* Point sp at the lowest address on the stack */
218 #ifdef CONFIG_STACK_GROWSUP
219 sp = (elf_addr_t __user *)bprm->p - items - ei_index;
220 bprm->exec = (unsigned long) sp; /* XXX: PARISC HACK */
222 sp = (elf_addr_t __user *)bprm->p;
225 /* Now, let's put argc (and argv, envp if appropriate) on the stack */
226 __put_user(argc, sp++);
229 envp = argv + argc + 1;
230 __put_user((elf_addr_t)(unsigned long)argv, sp++);
231 __put_user((elf_addr_t)(unsigned long)envp, sp++);
234 envp = argv + argc + 1;
237 /* Populate argv and envp */
238 p = current->mm->arg_start;
241 __put_user((elf_addr_t)p, argv++);
242 len = strnlen_user((void __user *)p, PAGE_SIZE*MAX_ARG_PAGES);
243 if (!len || len > PAGE_SIZE*MAX_ARG_PAGES)
248 current->mm->arg_end = current->mm->env_start = p;
251 __put_user((elf_addr_t)p, envp++);
252 len = strnlen_user((void __user *)p, PAGE_SIZE*MAX_ARG_PAGES);
253 if (!len || len > PAGE_SIZE*MAX_ARG_PAGES)
258 current->mm->env_end = p;
260 /* Put the elf_info on the stack in the right place. */
261 sp = (elf_addr_t __user *)envp + 1;
262 copy_to_user(sp, elf_info, ei_index * sizeof(elf_addr_t));
267 static unsigned long elf_map(struct file *filep, unsigned long addr,
268 struct elf_phdr *eppnt, int prot, int type,
269 unsigned long total_size)
271 unsigned long map_addr;
272 unsigned long size = eppnt->p_filesz + ELF_PAGEOFFSET(eppnt->p_vaddr);
273 unsigned long off = eppnt->p_offset - ELF_PAGEOFFSET(eppnt->p_vaddr);
275 addr = ELF_PAGESTART(addr);
276 size = ELF_PAGEALIGN(size);
278 down_write(¤t->mm->mmap_sem);
281 * total_size is the size of the ELF (interpreter) image.
282 * The _first_ mmap needs to know the full size, otherwise
283 * randomization might put this image into an overlapping
284 * position with the ELF binary image. (since size < total_size)
285 * So we first map the 'big' image - and unmap the remainder at
286 * the end. (which unmap is needed for ELF images with holes.)
289 total_size = ELF_PAGEALIGN(total_size);
290 map_addr = do_mmap(filep, addr, total_size, prot, type, off);
291 if (!BAD_ADDR(map_addr))
292 do_munmap(current->mm, map_addr+size, total_size-size);
294 map_addr = do_mmap(filep, addr, size, prot, type, off);
296 up_write(¤t->mm->mmap_sem);
301 #endif /* !elf_map */
303 static inline unsigned long total_mapping_size(struct elf_phdr *cmds, int nr)
305 int i, first_idx = -1, last_idx = -1;
307 for (i = 0; i < nr; i++)
308 if (cmds[i].p_type == PT_LOAD) {
317 return cmds[last_idx].p_vaddr + cmds[last_idx].p_memsz -
318 ELF_PAGESTART(cmds[first_idx].p_vaddr);
321 /* This is much more generalized than the library routine read function,
322 so we keep this separate. Technically the library read function
323 is only provided so that we can read a.out libraries that have
326 static unsigned long load_elf_interp(struct elfhdr * interp_elf_ex,
327 struct file * interpreter,
328 unsigned long *interp_load_addr,
329 unsigned long no_base)
331 struct elf_phdr *elf_phdata;
332 struct elf_phdr *eppnt;
333 unsigned long load_addr = 0;
334 int load_addr_set = 0;
335 unsigned long last_bss = 0, elf_bss = 0;
336 unsigned long error = ~0UL;
337 unsigned long total_size;
340 /* First of all, some simple consistency checks */
341 if (interp_elf_ex->e_type != ET_EXEC &&
342 interp_elf_ex->e_type != ET_DYN)
344 if (!elf_check_arch(interp_elf_ex))
346 if (!interpreter->f_op || !interpreter->f_op->mmap)
350 * If the size of this structure has changed, then punt, since
351 * we will be doing the wrong thing.
353 if (interp_elf_ex->e_phentsize != sizeof(struct elf_phdr))
355 if (interp_elf_ex->e_phnum < 1 ||
356 interp_elf_ex->e_phnum > 65536U / sizeof(struct elf_phdr))
359 /* Now read in all of the header information */
361 size = sizeof(struct elf_phdr) * interp_elf_ex->e_phnum;
362 if (size > ELF_MIN_ALIGN)
364 elf_phdata = (struct elf_phdr *) kmalloc(size, GFP_KERNEL);
368 retval = kernel_read(interpreter,interp_elf_ex->e_phoff,(char *)elf_phdata,size);
370 if (retval != size) {
376 total_size = total_mapping_size(elf_phdata, interp_elf_ex->e_phnum);
381 for (i=0; i<interp_elf_ex->e_phnum; i++, eppnt++) {
382 if (eppnt->p_type == PT_LOAD) {
383 int elf_type = MAP_PRIVATE | MAP_DENYWRITE;
385 unsigned long vaddr = 0;
386 unsigned long k, map_addr;
388 if (eppnt->p_flags & PF_R) elf_prot = PROT_READ;
389 if (eppnt->p_flags & PF_W) elf_prot |= PROT_WRITE;
390 if (eppnt->p_flags & PF_X) elf_prot |= PROT_EXEC;
391 vaddr = eppnt->p_vaddr;
392 if (interp_elf_ex->e_type == ET_EXEC || load_addr_set)
393 elf_type |= MAP_FIXED;
394 else if (no_base && interp_elf_ex->e_type == ET_DYN)
397 map_addr = elf_map(interpreter, load_addr + vaddr, eppnt, elf_prot, elf_type, total_size);
400 if (BAD_ADDR(map_addr))
403 if (!load_addr_set && interp_elf_ex->e_type == ET_DYN) {
404 load_addr = map_addr - ELF_PAGESTART(vaddr);
409 * Check to see if the section's size will overflow the
410 * allowed task size. Note that p_filesz must always be
411 * <= p_memsize so it is only necessary to check p_memsz.
413 k = load_addr + eppnt->p_vaddr;
414 if (k > TASK_SIZE || eppnt->p_filesz > eppnt->p_memsz ||
415 eppnt->p_memsz > TASK_SIZE || TASK_SIZE - eppnt->p_memsz < k) {
421 * Find the end of the file mapping for this phdr, and keep
422 * track of the largest address we see for this.
424 k = load_addr + eppnt->p_vaddr + eppnt->p_filesz;
429 * Do the same thing for the memory mapping - between
430 * elf_bss and last_bss is the bss section.
432 k = load_addr + eppnt->p_memsz + eppnt->p_vaddr;
439 * Now fill out the bss section. First pad the last page up
440 * to the page boundary, and then perform a mmap to make sure
441 * that there are zero-mapped pages up to and including the
445 elf_bss = ELF_PAGESTART(elf_bss + ELF_MIN_ALIGN - 1); /* What we have mapped so far */
447 /* Map the last of the bss segment */
448 if (last_bss > elf_bss) {
449 error = do_brk(elf_bss, last_bss - elf_bss);
454 *interp_load_addr = load_addr;
455 error = ((unsigned long) interp_elf_ex->e_entry) + load_addr;
463 static unsigned long load_aout_interp(struct exec * interp_ex,
464 struct file * interpreter)
466 unsigned long text_data, elf_entry = ~0UL;
470 current->mm->end_code = interp_ex->a_text;
471 text_data = interp_ex->a_text + interp_ex->a_data;
472 current->mm->end_data = text_data;
473 current->mm->brk = interp_ex->a_bss + text_data;
475 switch (N_MAGIC(*interp_ex)) {
478 addr = (char __user *)0;
482 offset = N_TXTOFF(*interp_ex);
483 addr = (char __user *) N_TXTADDR(*interp_ex);
489 do_brk(0, text_data);
490 if (!interpreter->f_op || !interpreter->f_op->read)
492 if (interpreter->f_op->read(interpreter, addr, text_data, &offset) < 0)
494 flush_icache_range((unsigned long)addr,
495 (unsigned long)addr + text_data);
497 do_brk(ELF_PAGESTART(text_data + ELF_MIN_ALIGN - 1),
499 elf_entry = interp_ex->a_entry;
506 * These are the functions used to load ELF style executables and shared
507 * libraries. There is no binary dependent code anywhere else.
510 #define INTERPRETER_NONE 0
511 #define INTERPRETER_AOUT 1
512 #define INTERPRETER_ELF 2
515 static int load_elf_binary(struct linux_binprm * bprm, struct pt_regs * regs)
517 struct file *interpreter = NULL; /* to shut gcc up */
518 unsigned long load_addr = 0, load_bias = 0;
519 int load_addr_set = 0;
520 char * elf_interpreter = NULL;
521 unsigned int interpreter_type = INTERPRETER_NONE;
522 unsigned char ibcs2_interpreter = 0;
524 struct elf_phdr * elf_ppnt, *elf_phdata;
525 unsigned long elf_bss, elf_brk;
529 unsigned long elf_entry, interp_load_addr = 0;
530 unsigned long start_code, end_code, start_data, end_data;
531 unsigned long reloc_func_desc = 0;
532 char passed_fileno[6];
533 struct files_struct *files;
534 int have_pt_gnu_stack, executable_stack, relocexec, old_relocexec = current->flags & PF_RELOCEXEC;
535 unsigned long def_flags = 0;
537 struct elfhdr elf_ex;
538 struct elfhdr interp_elf_ex;
539 struct exec interp_ex;
542 loc = kmalloc(sizeof(*loc), GFP_KERNEL);
548 /* Get the exec-header */
549 loc->elf_ex = *((struct elfhdr *) bprm->buf);
552 /* First of all, some simple consistency checks */
553 if (memcmp(loc->elf_ex.e_ident, ELFMAG, SELFMAG) != 0)
556 if (loc->elf_ex.e_type != ET_EXEC && loc->elf_ex.e_type != ET_DYN)
558 if (!elf_check_arch(&loc->elf_ex))
560 if (!bprm->file->f_op||!bprm->file->f_op->mmap)
563 /* Now read in all of the header information */
565 if (loc->elf_ex.e_phentsize != sizeof(struct elf_phdr))
567 if (loc->elf_ex.e_phnum < 1 ||
568 loc->elf_ex.e_phnum > 65536U / sizeof(struct elf_phdr))
570 size = loc->elf_ex.e_phnum * sizeof(struct elf_phdr);
572 elf_phdata = (struct elf_phdr *) kmalloc(size, GFP_KERNEL);
576 retval = kernel_read(bprm->file, loc->elf_ex.e_phoff, (char *) elf_phdata, size);
577 if (retval != size) {
583 files = current->files; /* Refcounted so ok */
584 retval = unshare_files();
587 if (files == current->files) {
588 put_files_struct(files);
592 /* exec will make our files private anyway, but for the a.out
593 loader stuff we need to do it earlier */
595 retval = get_unused_fd();
598 get_file(bprm->file);
599 fd_install(elf_exec_fileno = retval, bprm->file);
601 elf_ppnt = elf_phdata;
610 for (i = 0; i < loc->elf_ex.e_phnum; i++) {
611 if (elf_ppnt->p_type == PT_INTERP) {
612 /* This is the program interpreter used for
613 * shared libraries - for now assume that this
614 * is an a.out format binary
618 if (elf_ppnt->p_filesz > PATH_MAX ||
619 elf_ppnt->p_filesz < 2)
623 elf_interpreter = (char *) kmalloc(elf_ppnt->p_filesz,
625 if (!elf_interpreter)
628 retval = kernel_read(bprm->file, elf_ppnt->p_offset,
631 if (retval != elf_ppnt->p_filesz) {
634 goto out_free_interp;
636 /* make sure path is NULL terminated */
638 if (elf_interpreter[elf_ppnt->p_filesz - 1] != '\0')
639 goto out_free_interp;
641 /* If the program interpreter is one of these two,
642 * then assume an iBCS2 image. Otherwise assume
643 * a native linux image.
645 if (strcmp(elf_interpreter,"/usr/lib/libc.so.1") == 0 ||
646 strcmp(elf_interpreter,"/usr/lib/ld.so.1") == 0)
647 ibcs2_interpreter = 1;
650 * The early SET_PERSONALITY here is so that the lookup
651 * for the interpreter happens in the namespace of the
652 * to-be-execed image. SET_PERSONALITY can select an
655 * However, SET_PERSONALITY is NOT allowed to switch
656 * this task into the new images's memory mapping
657 * policy - that is, TASK_SIZE must still evaluate to
658 * that which is appropriate to the execing application.
659 * This is because exit_mmap() needs to have TASK_SIZE
660 * evaluate to the size of the old image.
662 * So if (say) a 64-bit application is execing a 32-bit
663 * application it is the architecture's responsibility
664 * to defer changing the value of TASK_SIZE until the
665 * switch really is going to happen - do this in
666 * flush_thread(). - akpm
668 SET_PERSONALITY(loc->elf_ex, ibcs2_interpreter);
670 interpreter = open_exec(elf_interpreter);
671 retval = PTR_ERR(interpreter);
672 if (IS_ERR(interpreter))
673 goto out_free_interp;
674 retval = kernel_read(interpreter, 0, bprm->buf, BINPRM_BUF_SIZE);
675 if (retval != BINPRM_BUF_SIZE) {
678 goto out_free_dentry;
681 /* Get the exec headers */
682 loc->interp_ex = *((struct exec *) bprm->buf);
683 loc->interp_elf_ex = *((struct elfhdr *) bprm->buf);
689 elf_ppnt = elf_phdata;
690 executable_stack = EXSTACK_DEFAULT;
692 for (i = 0; i < loc->elf_ex.e_phnum; i++, elf_ppnt++)
693 if (elf_ppnt->p_type == PT_GNU_STACK) {
694 if (elf_ppnt->p_flags & PF_X)
695 executable_stack = EXSTACK_ENABLE_X;
697 executable_stack = EXSTACK_DISABLE_X;
700 have_pt_gnu_stack = (i < loc->elf_ex.e_phnum);
704 if (current->personality == PER_LINUX)
705 switch (exec_shield) {
707 if (executable_stack == EXSTACK_DISABLE_X) {
708 current->flags |= PF_RELOCEXEC;
709 relocexec = PF_RELOCEXEC;
714 executable_stack = EXSTACK_DISABLE_X;
715 current->flags |= PF_RELOCEXEC;
716 relocexec = PF_RELOCEXEC;
720 /* Some simple consistency checks for the interpreter */
721 if (elf_interpreter) {
722 interpreter_type = INTERPRETER_ELF | INTERPRETER_AOUT;
724 /* Now figure out which format our binary is */
725 if ((N_MAGIC(loc->interp_ex) != OMAGIC) &&
726 (N_MAGIC(loc->interp_ex) != ZMAGIC) &&
727 (N_MAGIC(loc->interp_ex) != QMAGIC))
728 interpreter_type = INTERPRETER_ELF;
730 if (memcmp(loc->interp_elf_ex.e_ident, ELFMAG, SELFMAG) != 0)
731 interpreter_type &= ~INTERPRETER_ELF;
734 if (!interpreter_type)
735 goto out_free_dentry;
737 /* Make sure only one type was selected */
738 if ((interpreter_type & INTERPRETER_ELF) &&
739 interpreter_type != INTERPRETER_ELF) {
740 // FIXME - ratelimit this before re-enabling
741 // printk(KERN_WARNING "ELF: Ambiguous type, using ELF\n");
742 interpreter_type = INTERPRETER_ELF;
744 /* Verify the interpreter has a valid arch */
745 if ((interpreter_type == INTERPRETER_ELF) &&
746 !elf_check_arch(&loc->interp_elf_ex))
747 goto out_free_dentry;
749 /* Executables without an interpreter also need a personality */
750 SET_PERSONALITY(loc->elf_ex, ibcs2_interpreter);
753 /* OK, we are done with that, now set up the arg stuff,
754 and then start this sucker up */
756 if ((!bprm->sh_bang) && (interpreter_type == INTERPRETER_AOUT)) {
757 char *passed_p = passed_fileno;
758 sprintf(passed_fileno, "%d", elf_exec_fileno);
760 if (elf_interpreter) {
761 retval = copy_strings_kernel(1, &passed_p, bprm);
763 goto out_free_dentry;
768 /* Flush all traces of the currently running executable */
769 retval = flush_old_exec(bprm);
771 goto out_free_dentry;
772 current->flags |= relocexec;
776 * Turn off the CS limit completely if exec-shield disabled or
779 if (!exec_shield || executable_stack != EXSTACK_DISABLE_X || nx_enabled)
780 arch_add_exec_range(current->mm, -1);
783 /* Discard our unneeded old files struct */
786 put_files_struct(files);
790 /* OK, This is the point of no return */
791 current->mm->start_data = 0;
792 current->mm->end_data = 0;
793 current->mm->end_code = 0;
794 current->mm->mmap = NULL;
795 current->flags &= ~PF_FORKNOEXEC;
796 current->mm->def_flags = def_flags;
798 /* Do this immediately, since STACK_TOP as used in setup_arg_pages
799 may depend on the personality. */
800 SET_PERSONALITY(loc->elf_ex, ibcs2_interpreter);
801 if (exec_shield != 2 &&
802 elf_read_implies_exec(loc->elf_ex, have_pt_gnu_stack))
803 current->personality |= READ_IMPLIES_EXEC;
805 arch_pick_mmap_layout(current->mm);
807 /* Do this so that we can load the interpreter, if need be. We will
808 change some of these later */
809 // current->mm->rss = 0;
810 vx_rsspages_sub(current->mm, current->mm->rss);
811 current->mm->free_area_cache = current->mm->mmap_base;
812 retval = setup_arg_pages(bprm, executable_stack);
814 send_sig(SIGKILL, current, 0);
815 goto out_free_dentry;
818 current->mm->start_stack = bprm->p;
821 /* Now we do a little grungy work by mmaping the ELF image into
822 the correct location in memory.
825 for(i = 0, elf_ppnt = elf_phdata; i < loc->elf_ex.e_phnum; i++, elf_ppnt++) {
826 int elf_prot = 0, elf_flags;
827 unsigned long k, vaddr;
829 if (elf_ppnt->p_type != PT_LOAD)
832 if (unlikely (elf_brk > elf_bss)) {
835 /* There was a PT_LOAD segment with p_memsz > p_filesz
836 before this one. Map anonymous pages, if needed,
837 and clear the area. */
838 retval = set_brk (elf_bss + load_bias,
839 elf_brk + load_bias);
841 send_sig(SIGKILL, current, 0);
842 goto out_free_dentry;
844 nbyte = ELF_PAGEOFFSET(elf_bss);
846 nbyte = ELF_MIN_ALIGN - nbyte;
847 if (nbyte > elf_brk - elf_bss)
848 nbyte = elf_brk - elf_bss;
849 clear_user((void __user *) elf_bss + load_bias, nbyte);
853 if (elf_ppnt->p_flags & PF_R) elf_prot |= PROT_READ;
854 if (elf_ppnt->p_flags & PF_W) elf_prot |= PROT_WRITE;
855 if (elf_ppnt->p_flags & PF_X) elf_prot |= PROT_EXEC;
857 elf_flags = MAP_PRIVATE|MAP_DENYWRITE|MAP_EXECUTABLE;
859 vaddr = elf_ppnt->p_vaddr;
860 if (loc->elf_ex.e_type == ET_EXEC || load_addr_set)
861 elf_flags |= MAP_FIXED;
862 else if (loc->elf_ex.e_type == ET_DYN)
866 load_bias = ELF_PAGESTART(ELF_ET_DYN_BASE - vaddr);
869 error = elf_map(bprm->file, load_bias + vaddr, elf_ppnt, elf_prot, elf_flags, 0);
870 if (BAD_ADDR(error)) {
871 send_sig(SIGKILL, current, 0);
872 goto out_free_dentry;
875 if (!load_addr_set) {
877 load_addr = (elf_ppnt->p_vaddr - elf_ppnt->p_offset);
878 if (loc->elf_ex.e_type == ET_DYN) {
880 ELF_PAGESTART(load_bias + vaddr);
881 load_addr += load_bias;
882 reloc_func_desc = load_bias;
885 k = elf_ppnt->p_vaddr;
886 if (k < start_code) start_code = k;
887 if (start_data < k) start_data = k;
890 * Check to see if the section's size will overflow the
891 * allowed task size. Note that p_filesz must always be
892 * <= p_memsz so it is only necessary to check p_memsz.
894 if (k > TASK_SIZE || elf_ppnt->p_filesz > elf_ppnt->p_memsz ||
895 elf_ppnt->p_memsz > TASK_SIZE ||
896 TASK_SIZE - elf_ppnt->p_memsz < k) {
897 /* set_brk can never work. Avoid overflows. */
898 send_sig(SIGKILL, current, 0);
899 goto out_free_dentry;
902 k = elf_ppnt->p_vaddr + elf_ppnt->p_filesz;
906 if ((elf_ppnt->p_flags & PF_X) && end_code < k)
910 k = elf_ppnt->p_vaddr + elf_ppnt->p_memsz;
915 loc->elf_ex.e_entry += load_bias;
916 elf_bss += load_bias;
917 elf_brk += load_bias;
918 start_code += load_bias;
919 end_code += load_bias;
920 start_data += load_bias;
921 end_data += load_bias;
923 /* Calling set_brk effectively mmaps the pages that we need
924 * for the bss and break sections. We must do this before
925 * mapping in the interpreter, to make sure it doesn't wind
926 * up getting placed where the bss needs to go.
928 retval = set_brk(elf_bss, elf_brk);
930 send_sig(SIGKILL, current, 0);
931 goto out_free_dentry;
935 if (elf_interpreter) {
936 if (interpreter_type == INTERPRETER_AOUT)
937 elf_entry = load_aout_interp(&loc->interp_ex,
940 elf_entry = load_elf_interp(&loc->interp_elf_ex,
944 if (BAD_ADDR(elf_entry)) {
945 printk(KERN_ERR "Unable to load interpreter %.128s\n",
947 force_sig(SIGSEGV, current);
948 retval = -ENOEXEC; /* Nobody gets to see this, but.. */
949 goto out_free_dentry;
951 reloc_func_desc = interp_load_addr;
953 allow_write_access(interpreter);
955 kfree(elf_interpreter);
957 elf_entry = loc->elf_ex.e_entry;
962 if (interpreter_type != INTERPRETER_AOUT)
963 sys_close(elf_exec_fileno);
965 set_binfmt(&elf_format);
968 * Map the vsyscall trampoline. This address is then passed via
971 #ifdef __HAVE_ARCH_VSYSCALL
976 current->flags &= ~PF_FORKNOEXEC;
977 create_elf_tables(bprm, &loc->elf_ex, (interpreter_type == INTERPRETER_AOUT),
978 load_addr, interp_load_addr);
979 /* N.B. passed_fileno might not be initialized? */
980 if (interpreter_type == INTERPRETER_AOUT)
981 current->mm->arg_start += strlen(passed_fileno) + 1;
982 current->mm->end_code = end_code;
983 current->mm->start_code = start_code;
984 current->mm->start_data = start_data;
985 current->mm->end_data = end_data;
986 current->mm->start_stack = bprm->p;
988 #ifdef __HAVE_ARCH_RANDOMIZE_BRK
989 if (current->flags & PF_RELOCEXEC)
990 randomize_brk(elf_brk);
992 if (current->personality & MMAP_PAGE_ZERO) {
993 /* Why this, you ask??? Well SVr4 maps page 0 as read-only,
994 and some applications "depend" upon this behavior.
995 Since we do not have the power to recompile these, we
996 emulate the SVr4 behavior. Sigh. */
997 down_write(¤t->mm->mmap_sem);
998 error = do_mmap(NULL, 0, PAGE_SIZE, PROT_READ | PROT_EXEC,
999 MAP_FIXED | MAP_PRIVATE, 0);
1000 up_write(¤t->mm->mmap_sem);
1003 #ifdef ELF_PLAT_INIT
1005 * The ABI may specify that certain registers be set up in special
1006 * ways (on i386 %edx is the address of a DT_FINI function, for
1007 * example. In addition, it may also specify (eg, PowerPC64 ELF)
1008 * that the e_entry field is the address of the function descriptor
1009 * for the startup routine, rather than the address of the startup
1010 * routine itself. This macro performs whatever initialization to
1011 * the regs structure is required as well as any relocations to the
1012 * function descriptor entries when executing dynamically links apps.
1014 ELF_PLAT_INIT(regs, reloc_func_desc);
1017 start_thread(regs, elf_entry, bprm->p);
1018 if (unlikely(current->ptrace & PT_PTRACED)) {
1019 if (current->ptrace & PT_TRACE_EXEC)
1020 ptrace_notify ((PTRACE_EVENT_EXEC << 8) | SIGTRAP);
1022 send_sig(SIGTRAP, current, 0);
1032 allow_write_access(interpreter);
1036 if (elf_interpreter)
1037 kfree(elf_interpreter);
1039 sys_close(elf_exec_fileno);
1042 put_files_struct(current->files);
1043 current->files = files;
1047 current->flags &= ~PF_RELOCEXEC;
1048 current->flags |= old_relocexec;
1052 /* This is really simpleminded and specialized - we are loading an
1053 a.out library that is given an ELF header. */
1055 static int load_elf_library(struct file *file)
1057 struct elf_phdr *elf_phdata;
1058 unsigned long elf_bss, bss, len;
1059 int retval, error, i, j;
1060 struct elfhdr elf_ex;
1063 retval = kernel_read(file, 0, (char *) &elf_ex, sizeof(elf_ex));
1064 if (retval != sizeof(elf_ex))
1067 if (memcmp(elf_ex.e_ident, ELFMAG, SELFMAG) != 0)
1070 /* First of all, some simple consistency checks */
1071 if (elf_ex.e_type != ET_EXEC || elf_ex.e_phnum > 2 ||
1072 !elf_check_arch(&elf_ex) || !file->f_op || !file->f_op->mmap)
1075 /* Now read in all of the header information */
1077 j = sizeof(struct elf_phdr) * elf_ex.e_phnum;
1078 /* j < ELF_MIN_ALIGN because elf_ex.e_phnum <= 2 */
1081 elf_phdata = (struct elf_phdr *) kmalloc(j, GFP_KERNEL);
1086 retval = kernel_read(file, elf_ex.e_phoff, (char *) elf_phdata, j);
1090 for (j = 0, i = 0; i<elf_ex.e_phnum; i++)
1091 if ((elf_phdata + i)->p_type == PT_LOAD) j++;
1095 while (elf_phdata->p_type != PT_LOAD) elf_phdata++;
1097 /* Now use mmap to map the library into memory. */
1098 down_write(¤t->mm->mmap_sem);
1099 error = do_mmap(file,
1100 ELF_PAGESTART(elf_phdata->p_vaddr),
1101 (elf_phdata->p_filesz +
1102 ELF_PAGEOFFSET(elf_phdata->p_vaddr)),
1103 PROT_READ | PROT_WRITE | PROT_EXEC,
1104 MAP_FIXED | MAP_PRIVATE | MAP_DENYWRITE,
1105 (elf_phdata->p_offset -
1106 ELF_PAGEOFFSET(elf_phdata->p_vaddr)));
1107 up_write(¤t->mm->mmap_sem);
1108 if (error != ELF_PAGESTART(elf_phdata->p_vaddr))
1111 elf_bss = elf_phdata->p_vaddr + elf_phdata->p_filesz;
1114 len = ELF_PAGESTART(elf_phdata->p_filesz + elf_phdata->p_vaddr + ELF_MIN_ALIGN - 1);
1115 bss = elf_phdata->p_memsz + elf_phdata->p_vaddr;
1117 do_brk(len, bss - len);
1127 * Note that some platforms still use traditional core dumps and not
1128 * the ELF core dump. Each platform can select it as appropriate.
1130 #ifdef USE_ELF_CORE_DUMP
1135 * Modelled on fs/exec.c:aout_core_dump()
1136 * Jeremy Fitzhardinge <jeremy@sw.oz.au>
1139 * These are the only things you should do on a core-file: use only these
1140 * functions to write out all the necessary info.
1142 static int dump_write(struct file *file, const void *addr, int nr)
1144 return file->f_op->write(file, addr, nr, &file->f_pos) == nr;
1147 static int dump_seek(struct file *file, off_t off)
1149 if (file->f_op->llseek) {
1150 if (file->f_op->llseek(file, off, 0) != off)
1158 * Decide whether a segment is worth dumping; default is yes to be
1159 * sure (missing info is worse than too much; etc).
1160 * Personally I'd include everything, and use the coredump limit...
1162 * I think we should skip something. But I am not sure how. H.J.
1164 static int maydump(struct vm_area_struct *vma)
1166 /* Do not dump I/O mapped devices, shared memory, or special mappings */
1167 if (vma->vm_flags & (VM_IO | VM_SHARED | VM_RESERVED))
1170 /* If it hasn't been written to, don't write it out */
1177 #define roundup(x, y) ((((x)+((y)-1))/(y))*(y))
1179 /* An ELF note in memory */
1184 unsigned int datasz;
1188 static int notesize(struct memelfnote *en)
1192 sz = sizeof(struct elf_note);
1193 sz += roundup(strlen(en->name) + 1, 4);
1194 sz += roundup(en->datasz, 4);
1199 #define DUMP_WRITE(addr, nr) \
1200 do { if (!dump_write(file, (addr), (nr))) return 0; } while(0)
1201 #define DUMP_SEEK(off) \
1202 do { if (!dump_seek(file, (off))) return 0; } while(0)
1204 static int writenote(struct memelfnote *men, struct file *file)
1208 en.n_namesz = strlen(men->name) + 1;
1209 en.n_descsz = men->datasz;
1210 en.n_type = men->type;
1212 DUMP_WRITE(&en, sizeof(en));
1213 DUMP_WRITE(men->name, en.n_namesz);
1214 /* XXX - cast from long long to long to avoid need for libgcc.a */
1215 DUMP_SEEK(roundup((unsigned long)file->f_pos, 4)); /* XXX */
1216 DUMP_WRITE(men->data, men->datasz);
1217 DUMP_SEEK(roundup((unsigned long)file->f_pos, 4)); /* XXX */
1224 #define DUMP_WRITE(addr, nr) \
1225 if ((size += (nr)) > limit || !dump_write(file, (addr), (nr))) \
1227 #define DUMP_SEEK(off) \
1228 if (!dump_seek(file, (off))) \
1231 static inline void fill_elf_header(struct elfhdr *elf, int segs)
1233 memcpy(elf->e_ident, ELFMAG, SELFMAG);
1234 elf->e_ident[EI_CLASS] = ELF_CLASS;
1235 elf->e_ident[EI_DATA] = ELF_DATA;
1236 elf->e_ident[EI_VERSION] = EV_CURRENT;
1237 elf->e_ident[EI_OSABI] = ELF_OSABI;
1238 memset(elf->e_ident+EI_PAD, 0, EI_NIDENT-EI_PAD);
1240 elf->e_type = ET_CORE;
1241 elf->e_machine = ELF_ARCH;
1242 elf->e_version = EV_CURRENT;
1244 elf->e_phoff = sizeof(struct elfhdr);
1247 elf->e_ehsize = sizeof(struct elfhdr);
1248 elf->e_phentsize = sizeof(struct elf_phdr);
1249 elf->e_phnum = segs;
1250 elf->e_shentsize = 0;
1252 elf->e_shstrndx = 0;
1256 static inline void fill_elf_note_phdr(struct elf_phdr *phdr, int sz, off_t offset)
1258 phdr->p_type = PT_NOTE;
1259 phdr->p_offset = offset;
1262 phdr->p_filesz = sz;
1269 static void fill_note(struct memelfnote *note, const char *name, int type,
1270 unsigned int sz, void *data)
1280 * fill up all the fields in prstatus from the given task struct, except registers
1281 * which need to be filled up separately.
1283 static void fill_prstatus(struct elf_prstatus *prstatus,
1284 struct task_struct *p, long signr)
1286 prstatus->pr_info.si_signo = prstatus->pr_cursig = signr;
1287 prstatus->pr_sigpend = p->pending.signal.sig[0];
1288 prstatus->pr_sighold = p->blocked.sig[0];
1289 prstatus->pr_pid = p->pid;
1290 prstatus->pr_ppid = p->parent->pid;
1291 prstatus->pr_pgrp = process_group(p);
1292 prstatus->pr_sid = p->signal->session;
1293 if (p->pid == p->tgid) {
1295 * This is the record for the group leader. Add in the
1296 * cumulative times of previous dead threads. This total
1297 * won't include the time of each live thread whose state
1298 * is included in the core dump. The final total reported
1299 * to our parent process when it calls wait4 will include
1300 * those sums as well as the little bit more time it takes
1301 * this and each other thread to finish dying after the
1302 * core dump synchronization phase.
1304 jiffies_to_timeval(p->utime + p->signal->utime,
1305 &prstatus->pr_utime);
1306 jiffies_to_timeval(p->stime + p->signal->stime,
1307 &prstatus->pr_stime);
1309 jiffies_to_timeval(p->utime, &prstatus->pr_utime);
1310 jiffies_to_timeval(p->stime, &prstatus->pr_stime);
1312 jiffies_to_timeval(p->signal->cutime, &prstatus->pr_cutime);
1313 jiffies_to_timeval(p->signal->cstime, &prstatus->pr_cstime);
1316 static void fill_psinfo(struct elf_prpsinfo *psinfo, struct task_struct *p,
1317 struct mm_struct *mm)
1321 /* first copy the parameters from user space */
1322 memset(psinfo, 0, sizeof(struct elf_prpsinfo));
1324 len = mm->arg_end - mm->arg_start;
1325 if (len >= ELF_PRARGSZ)
1326 len = ELF_PRARGSZ-1;
1327 copy_from_user(&psinfo->pr_psargs,
1328 (const char __user *)mm->arg_start, len);
1329 for(i = 0; i < len; i++)
1330 if (psinfo->pr_psargs[i] == 0)
1331 psinfo->pr_psargs[i] = ' ';
1332 psinfo->pr_psargs[len] = 0;
1334 psinfo->pr_pid = p->pid;
1335 psinfo->pr_ppid = p->parent->pid;
1336 psinfo->pr_pgrp = process_group(p);
1337 psinfo->pr_sid = p->signal->session;
1339 i = p->state ? ffz(~p->state) + 1 : 0;
1340 psinfo->pr_state = i;
1341 psinfo->pr_sname = (i < 0 || i > 5) ? '.' : "RSDTZW"[i];
1342 psinfo->pr_zomb = psinfo->pr_sname == 'Z';
1343 psinfo->pr_nice = task_nice(p);
1344 psinfo->pr_flag = p->flags;
1345 SET_UID(psinfo->pr_uid, p->uid);
1346 SET_GID(psinfo->pr_gid, p->gid);
1347 strncpy(psinfo->pr_fname, p->comm, sizeof(psinfo->pr_fname));
1352 /* Here is the structure in which status of each thread is captured. */
1353 struct elf_thread_status
1355 struct list_head list;
1356 struct elf_prstatus prstatus; /* NT_PRSTATUS */
1357 elf_fpregset_t fpu; /* NT_PRFPREG */
1358 struct task_struct *thread;
1359 #ifdef ELF_CORE_COPY_XFPREGS
1360 elf_fpxregset_t xfpu; /* NT_PRXFPREG */
1362 struct memelfnote notes[3];
1367 * In order to add the specific thread information for the elf file format,
1368 * we need to keep a linked list of every threads pr_status and then
1369 * create a single section for them in the final core file.
1371 static int elf_dump_thread_status(long signr, struct elf_thread_status *t)
1374 struct task_struct *p = t->thread;
1377 fill_prstatus(&t->prstatus, p, signr);
1378 elf_core_copy_task_regs(p, &t->prstatus.pr_reg);
1380 fill_note(&t->notes[0], "CORE", NT_PRSTATUS, sizeof(t->prstatus), &(t->prstatus));
1382 sz += notesize(&t->notes[0]);
1384 if ((t->prstatus.pr_fpvalid = elf_core_copy_task_fpregs(p, NULL, &t->fpu))) {
1385 fill_note(&t->notes[1], "CORE", NT_PRFPREG, sizeof(t->fpu), &(t->fpu));
1387 sz += notesize(&t->notes[1]);
1390 #ifdef ELF_CORE_COPY_XFPREGS
1391 if (elf_core_copy_task_xfpregs(p, &t->xfpu)) {
1392 fill_note(&t->notes[2], "LINUX", NT_PRXFPREG, sizeof(t->xfpu), &t->xfpu);
1394 sz += notesize(&t->notes[2]);
1403 * This is a two-pass process; first we find the offsets of the bits,
1404 * and then they are actually written out. If we run out of core limit
1407 static int elf_core_dump(long signr, struct pt_regs * regs, struct file * file)
1415 struct vm_area_struct *vma;
1416 struct elfhdr *elf = NULL;
1417 off_t offset = 0, dataoff;
1418 unsigned long limit = current->signal->rlim[RLIMIT_CORE].rlim_cur;
1420 struct memelfnote *notes = NULL;
1421 struct elf_prstatus *prstatus = NULL; /* NT_PRSTATUS */
1422 struct elf_prpsinfo *psinfo = NULL; /* NT_PRPSINFO */
1423 struct task_struct *g, *p;
1424 LIST_HEAD(thread_list);
1425 struct list_head *t;
1426 elf_fpregset_t *fpu = NULL;
1427 #ifdef ELF_CORE_COPY_XFPREGS
1428 elf_fpxregset_t *xfpu = NULL;
1430 int thread_status_size = 0;
1434 * We no longer stop all VM operations.
1436 * This is because those proceses that could possibly change map_count or
1437 * the mmap / vma pages are now blocked in do_exit on current finishing
1440 * Only ptrace can touch these memory addresses, but it doesn't change
1441 * the map_count or the pages allocated. So no possibility of crashing
1442 * exists while dumping the mm->vm_next areas to the core file.
1445 /* alloc memory for large data structures: too large to be on stack */
1446 elf = kmalloc(sizeof(*elf), GFP_KERNEL);
1449 prstatus = kmalloc(sizeof(*prstatus), GFP_KERNEL);
1452 psinfo = kmalloc(sizeof(*psinfo), GFP_KERNEL);
1455 notes = kmalloc(NUM_NOTES * sizeof(struct memelfnote), GFP_KERNEL);
1458 fpu = kmalloc(sizeof(*fpu), GFP_KERNEL);
1461 #ifdef ELF_CORE_COPY_XFPREGS
1462 xfpu = kmalloc(sizeof(*xfpu), GFP_KERNEL);
1468 struct elf_thread_status *tmp;
1469 read_lock(&tasklist_lock);
1471 if (current->mm == p->mm && current != p) {
1472 tmp = kmalloc(sizeof(*tmp), GFP_ATOMIC);
1474 read_unlock(&tasklist_lock);
1477 memset(tmp, 0, sizeof(*tmp));
1478 INIT_LIST_HEAD(&tmp->list);
1480 list_add(&tmp->list, &thread_list);
1482 while_each_thread(g,p);
1483 read_unlock(&tasklist_lock);
1484 list_for_each(t, &thread_list) {
1485 struct elf_thread_status *tmp;
1488 tmp = list_entry(t, struct elf_thread_status, list);
1489 sz = elf_dump_thread_status(signr, tmp);
1490 thread_status_size += sz;
1493 /* now collect the dump for the current */
1494 memset(prstatus, 0, sizeof(*prstatus));
1495 fill_prstatus(prstatus, current, signr);
1496 elf_core_copy_regs(&prstatus->pr_reg, regs);
1498 segs = current->mm->map_count;
1499 #ifdef ELF_CORE_EXTRA_PHDRS
1500 segs += ELF_CORE_EXTRA_PHDRS;
1504 fill_elf_header(elf, segs+1); /* including notes section */
1507 current->flags |= PF_DUMPCORE;
1510 * Set up the notes in similar form to SVR4 core dumps made
1511 * with info from their /proc.
1514 fill_note(notes +0, "CORE", NT_PRSTATUS, sizeof(*prstatus), prstatus);
1516 fill_psinfo(psinfo, current->group_leader, current->mm);
1517 fill_note(notes +1, "CORE", NT_PRPSINFO, sizeof(*psinfo), psinfo);
1519 fill_note(notes +2, "CORE", NT_TASKSTRUCT, sizeof(*current), current);
1523 auxv = (elf_addr_t *) current->mm->saved_auxv;
1528 while (auxv[i - 2] != AT_NULL);
1529 fill_note(¬es[numnote++], "CORE", NT_AUXV,
1530 i * sizeof (elf_addr_t), auxv);
1532 /* Try to dump the FPU. */
1533 if ((prstatus->pr_fpvalid = elf_core_copy_task_fpregs(current, regs, fpu)))
1534 fill_note(notes + numnote++,
1535 "CORE", NT_PRFPREG, sizeof(*fpu), fpu);
1536 #ifdef ELF_CORE_COPY_XFPREGS
1537 if (elf_core_copy_task_xfpregs(current, xfpu))
1538 fill_note(notes + numnote++,
1539 "LINUX", NT_PRXFPREG, sizeof(*xfpu), xfpu);
1545 DUMP_WRITE(elf, sizeof(*elf));
1546 offset += sizeof(*elf); /* Elf header */
1547 offset += (segs+1) * sizeof(struct elf_phdr); /* Program headers */
1549 /* Write notes phdr entry */
1551 struct elf_phdr phdr;
1554 for (i = 0; i < numnote; i++)
1555 sz += notesize(notes + i);
1557 sz += thread_status_size;
1559 fill_elf_note_phdr(&phdr, sz, offset);
1561 DUMP_WRITE(&phdr, sizeof(phdr));
1564 /* Page-align dumped data */
1565 dataoff = offset = roundup(offset, ELF_EXEC_PAGESIZE);
1567 /* Write program headers for segments dump */
1568 for (vma = current->mm->mmap; vma != NULL; vma = vma->vm_next) {
1569 struct elf_phdr phdr;
1572 sz = vma->vm_end - vma->vm_start;
1574 phdr.p_type = PT_LOAD;
1575 phdr.p_offset = offset;
1576 phdr.p_vaddr = vma->vm_start;
1578 phdr.p_filesz = maydump(vma) ? sz : 0;
1580 offset += phdr.p_filesz;
1581 phdr.p_flags = vma->vm_flags & VM_READ ? PF_R : 0;
1582 if (vma->vm_flags & VM_WRITE) phdr.p_flags |= PF_W;
1583 if (vma->vm_flags & VM_EXEC) phdr.p_flags |= PF_X;
1584 phdr.p_align = ELF_EXEC_PAGESIZE;
1586 DUMP_WRITE(&phdr, sizeof(phdr));
1589 #ifdef ELF_CORE_WRITE_EXTRA_PHDRS
1590 ELF_CORE_WRITE_EXTRA_PHDRS;
1593 /* write out the notes section */
1594 for (i = 0; i < numnote; i++)
1595 if (!writenote(notes + i, file))
1598 /* write out the thread status notes section */
1599 list_for_each(t, &thread_list) {
1600 struct elf_thread_status *tmp = list_entry(t, struct elf_thread_status, list);
1601 for (i = 0; i < tmp->num_notes; i++)
1602 if (!writenote(&tmp->notes[i], file))
1608 for (vma = current->mm->mmap; vma != NULL; vma = vma->vm_next) {
1614 for (addr = vma->vm_start;
1616 addr += PAGE_SIZE) {
1618 struct vm_area_struct *vma;
1620 if (get_user_pages(current, current->mm, addr, 1, 0, 1,
1621 &page, &vma) <= 0) {
1622 DUMP_SEEK (file->f_pos + PAGE_SIZE);
1624 if (page == ZERO_PAGE(addr)) {
1625 DUMP_SEEK (file->f_pos + PAGE_SIZE);
1628 flush_cache_page(vma, addr);
1630 if ((size += PAGE_SIZE) > limit ||
1631 !dump_write(file, kaddr,
1634 page_cache_release(page);
1639 page_cache_release(page);
1644 #ifdef ELF_CORE_WRITE_EXTRA_DATA
1645 ELF_CORE_WRITE_EXTRA_DATA;
1648 if ((off_t) file->f_pos != offset) {
1650 printk("elf_core_dump: file->f_pos (%ld) != offset (%ld)\n",
1651 (off_t) file->f_pos, offset);
1658 while(!list_empty(&thread_list)) {
1659 struct list_head *tmp = thread_list.next;
1661 kfree(list_entry(tmp, struct elf_thread_status, list));
1669 #ifdef ELF_CORE_COPY_XFPREGS
1676 #endif /* USE_ELF_CORE_DUMP */
1678 static int __init init_elf_binfmt(void)
1680 return register_binfmt(&elf_format);
1683 static void __exit exit_elf_binfmt(void)
1685 /* Remove the COFF and ELF loaders. */
1686 unregister_binfmt(&elf_format);
1689 core_initcall(init_elf_binfmt);
1690 module_exit(exit_elf_binfmt);
1691 MODULE_LICENSE("GPL");