2 * linux/fs/binfmt_elf.c
4 * These are the functions used to load ELF format executables as used
5 * on SVr4 machines. Information on the format may be found in the book
6 * "UNIX SYSTEM V RELEASE 4 Programmers Guide: Ansi C and Programming Support
9 * Copyright 1993, 1994: Eric Youngdale (ericy@cais.com).
12 #include <linux/module.h>
13 #include <linux/kernel.h>
15 #include <linux/stat.h>
16 #include <linux/time.h>
18 #include <linux/mman.h>
19 #include <linux/a.out.h>
20 #include <linux/errno.h>
21 #include <linux/signal.h>
22 #include <linux/binfmts.h>
23 #include <linux/string.h>
24 #include <linux/file.h>
25 #include <linux/fcntl.h>
26 #include <linux/ptrace.h>
27 #include <linux/slab.h>
28 #include <linux/shm.h>
29 #include <linux/personality.h>
30 #include <linux/elfcore.h>
31 #include <linux/init.h>
32 #include <linux/highuid.h>
33 #include <linux/smp.h>
34 #include <linux/smp_lock.h>
35 #include <linux/compiler.h>
36 #include <linux/highmem.h>
37 #include <linux/pagemap.h>
38 #include <linux/security.h>
39 #include <linux/syscalls.h>
40 #include <linux/vs_memory.h>
42 #include <asm/uaccess.h>
43 #include <asm/param.h>
46 #include <linux/elf.h>
48 static int load_elf_binary(struct linux_binprm * bprm, struct pt_regs * regs);
49 static int load_elf_library(struct file*);
50 static unsigned long elf_map (struct file *, unsigned long, struct elf_phdr *, int, int, unsigned long);
51 extern int dump_fpu (struct pt_regs *, elf_fpregset_t *);
54 #define elf_addr_t unsigned long
58 * If we don't support core dumping, then supply a NULL so we
61 #ifdef USE_ELF_CORE_DUMP
62 static int elf_core_dump(long signr, struct pt_regs * regs, struct file * file);
64 #define elf_core_dump NULL
67 #if ELF_EXEC_PAGESIZE > PAGE_SIZE
68 # define ELF_MIN_ALIGN ELF_EXEC_PAGESIZE
70 # define ELF_MIN_ALIGN PAGE_SIZE
73 #define ELF_PAGESTART(_v) ((_v) & ~(unsigned long)(ELF_MIN_ALIGN-1))
74 #define ELF_PAGEOFFSET(_v) ((_v) & (ELF_MIN_ALIGN-1))
75 #define ELF_PAGEALIGN(_v) (((_v) + ELF_MIN_ALIGN - 1) & ~(ELF_MIN_ALIGN - 1))
77 static struct linux_binfmt elf_format = {
78 .module = THIS_MODULE,
79 .load_binary = load_elf_binary,
80 .load_shlib = load_elf_library,
81 .core_dump = elf_core_dump,
82 .min_coredump = ELF_EXEC_PAGESIZE
85 #define BAD_ADDR(x) ((unsigned long)(x) > TASK_SIZE)
87 static int set_brk(unsigned long start, unsigned long end)
89 start = ELF_PAGEALIGN(start);
90 end = ELF_PAGEALIGN(end);
92 unsigned long addr = do_brk(start, end - start);
96 current->mm->start_brk = current->mm->brk = end;
101 /* We need to explicitly zero any fractional pages
102 after the data section (i.e. bss). This would
103 contain the junk from the file that should not
107 static void padzero(unsigned long elf_bss)
111 nbyte = ELF_PAGEOFFSET(elf_bss);
113 nbyte = ELF_MIN_ALIGN - nbyte;
114 clear_user((void __user *) elf_bss, nbyte);
118 /* Let's use some macros to make this stack manipulation a litle clearer */
119 #ifdef CONFIG_STACK_GROWSUP
120 #define STACK_ADD(sp, items) ((elf_addr_t __user *)(sp) + (items))
121 #define STACK_ROUND(sp, items) \
122 ((15 + (unsigned long) ((sp) + (items))) &~ 15UL)
123 #define STACK_ALLOC(sp, len) ({ elf_addr_t __user *old_sp = (elf_addr_t __user *)sp; sp += len; old_sp; })
125 #define STACK_ADD(sp, items) ((elf_addr_t __user *)(sp) - (items))
126 #define STACK_ROUND(sp, items) \
127 (((unsigned long) (sp - items)) &~ 15UL)
128 #define STACK_ALLOC(sp, len) ({ sp -= len ; sp; })
132 create_elf_tables(struct linux_binprm *bprm, struct elfhdr * exec,
133 int interp_aout, unsigned long load_addr,
134 unsigned long interp_load_addr)
136 unsigned long p = bprm->p;
137 int argc = bprm->argc;
138 int envc = bprm->envc;
139 elf_addr_t __user *argv;
140 elf_addr_t __user *envp;
141 elf_addr_t __user *sp;
142 elf_addr_t __user *u_platform;
143 const char *k_platform = ELF_PLATFORM;
145 elf_addr_t *elf_info;
147 struct task_struct *tsk = current;
150 * If this architecture has a platform capability string, copy it
151 * to userspace. In some cases (Sparc), this info is impossible
152 * for userspace to get any other way, in others (i386) it is
158 size_t len = strlen(k_platform) + 1;
160 #ifdef __HAVE_ARCH_ALIGN_STACK
161 p = (unsigned long)arch_align_stack((unsigned long)p);
163 u_platform = (elf_addr_t __user *)STACK_ALLOC(p, len);
164 __copy_to_user(u_platform, k_platform, len);
167 /* Create the ELF interpreter info */
168 elf_info = (elf_addr_t *) current->mm->saved_auxv;
169 #define NEW_AUX_ENT(id, val) \
170 do { elf_info[ei_index++] = id; elf_info[ei_index++] = val; } while (0)
174 * ARCH_DLINFO must come first so PPC can do its special alignment of
179 NEW_AUX_ENT(AT_HWCAP, ELF_HWCAP);
180 NEW_AUX_ENT(AT_PAGESZ, ELF_EXEC_PAGESIZE);
181 NEW_AUX_ENT(AT_CLKTCK, CLOCKS_PER_SEC);
182 NEW_AUX_ENT(AT_PHDR, load_addr + exec->e_phoff);
183 NEW_AUX_ENT(AT_PHENT, sizeof (struct elf_phdr));
184 NEW_AUX_ENT(AT_PHNUM, exec->e_phnum);
185 NEW_AUX_ENT(AT_BASE, interp_load_addr);
186 NEW_AUX_ENT(AT_FLAGS, 0);
187 NEW_AUX_ENT(AT_ENTRY, exec->e_entry);
188 NEW_AUX_ENT(AT_UID, (elf_addr_t) tsk->uid);
189 NEW_AUX_ENT(AT_EUID, (elf_addr_t) tsk->euid);
190 NEW_AUX_ENT(AT_GID, (elf_addr_t) tsk->gid);
191 NEW_AUX_ENT(AT_EGID, (elf_addr_t) tsk->egid);
192 NEW_AUX_ENT(AT_SECURE, (elf_addr_t) security_bprm_secureexec(bprm));
194 NEW_AUX_ENT(AT_PLATFORM, (elf_addr_t)(unsigned long)u_platform);
196 if (bprm->interp_flags & BINPRM_FLAGS_EXECFD) {
197 NEW_AUX_ENT(AT_EXECFD, (elf_addr_t) bprm->interp_data);
200 /* AT_NULL is zero; clear the rest too */
201 memset(&elf_info[ei_index], 0,
202 sizeof current->mm->saved_auxv - ei_index * sizeof elf_info[0]);
204 /* And advance past the AT_NULL entry. */
207 sp = STACK_ADD(p, ei_index);
209 items = (argc + 1) + (envc + 1);
211 items += 3; /* a.out interpreters require argv & envp too */
213 items += 1; /* ELF interpreters only put argc on the stack */
215 bprm->p = STACK_ROUND(sp, items);
217 /* Point sp at the lowest address on the stack */
218 #ifdef CONFIG_STACK_GROWSUP
219 sp = (elf_addr_t __user *)bprm->p - items - ei_index;
220 bprm->exec = (unsigned long) sp; /* XXX: PARISC HACK */
222 sp = (elf_addr_t __user *)bprm->p;
225 /* Now, let's put argc (and argv, envp if appropriate) on the stack */
226 __put_user(argc, sp++);
229 envp = argv + argc + 1;
230 __put_user((elf_addr_t)(unsigned long)argv, sp++);
231 __put_user((elf_addr_t)(unsigned long)envp, sp++);
234 envp = argv + argc + 1;
237 /* Populate argv and envp */
238 p = current->mm->arg_start;
241 __put_user((elf_addr_t)p, argv++);
242 len = strnlen_user((void __user *)p, PAGE_SIZE*MAX_ARG_PAGES);
243 if (!len || len > PAGE_SIZE*MAX_ARG_PAGES)
248 current->mm->arg_end = current->mm->env_start = p;
251 __put_user((elf_addr_t)p, envp++);
252 len = strnlen_user((void __user *)p, PAGE_SIZE*MAX_ARG_PAGES);
253 if (!len || len > PAGE_SIZE*MAX_ARG_PAGES)
258 current->mm->env_end = p;
260 /* Put the elf_info on the stack in the right place. */
261 sp = (elf_addr_t __user *)envp + 1;
262 copy_to_user(sp, elf_info, ei_index * sizeof(elf_addr_t));
267 static unsigned long elf_map(struct file *filep, unsigned long addr,
268 struct elf_phdr *eppnt, int prot, int type,
269 unsigned long total_size)
271 unsigned long map_addr;
272 unsigned long size = eppnt->p_filesz + ELF_PAGEOFFSET(eppnt->p_vaddr);
273 unsigned long off = eppnt->p_offset - ELF_PAGEOFFSET(eppnt->p_vaddr);
275 addr = ELF_PAGESTART(addr);
276 size = ELF_PAGEALIGN(size);
278 down_write(¤t->mm->mmap_sem);
281 * total_size is the size of the ELF (interpreter) image.
282 * The _first_ mmap needs to know the full size, otherwise
283 * randomization might put this image into an overlapping
284 * position with the ELF binary image. (since size < total_size)
285 * So we first map the 'big' image - and unmap the remainder at
286 * the end. (which unmap is needed for ELF images with holes.)
289 total_size = ELF_PAGEALIGN(total_size);
290 map_addr = do_mmap(filep, addr, total_size, prot, type, off);
291 if (!BAD_ADDR(map_addr))
292 do_munmap(current->mm, map_addr+size, total_size-size);
294 map_addr = do_mmap(filep, addr, size, prot, type, off);
296 up_write(¤t->mm->mmap_sem);
301 #endif /* !elf_map */
303 static inline unsigned long total_mapping_size(struct elf_phdr *cmds, int nr)
305 int i, first_idx = -1, last_idx = -1;
307 for (i = 0; i < nr; i++)
308 if (cmds[i].p_type == PT_LOAD) {
317 return cmds[last_idx].p_vaddr + cmds[last_idx].p_memsz -
318 ELF_PAGESTART(cmds[first_idx].p_vaddr);
321 /* This is much more generalized than the library routine read function,
322 so we keep this separate. Technically the library read function
323 is only provided so that we can read a.out libraries that have
326 static unsigned long load_elf_interp(struct elfhdr * interp_elf_ex,
327 struct file * interpreter,
328 unsigned long *interp_load_addr,
329 unsigned long no_base)
331 struct elf_phdr *elf_phdata;
332 struct elf_phdr *eppnt;
333 unsigned long load_addr = 0;
334 int load_addr_set = 0;
335 unsigned long last_bss = 0, elf_bss = 0;
336 unsigned long error = ~0UL;
337 unsigned long total_size;
340 /* First of all, some simple consistency checks */
341 if (interp_elf_ex->e_type != ET_EXEC &&
342 interp_elf_ex->e_type != ET_DYN)
344 if (!elf_check_arch(interp_elf_ex))
346 if (!interpreter->f_op || !interpreter->f_op->mmap)
350 * If the size of this structure has changed, then punt, since
351 * we will be doing the wrong thing.
353 if (interp_elf_ex->e_phentsize != sizeof(struct elf_phdr))
355 if (interp_elf_ex->e_phnum < 1 ||
356 interp_elf_ex->e_phnum > 65536U / sizeof(struct elf_phdr))
359 /* Now read in all of the header information */
361 size = sizeof(struct elf_phdr) * interp_elf_ex->e_phnum;
362 if (size > ELF_MIN_ALIGN)
364 elf_phdata = (struct elf_phdr *) kmalloc(size, GFP_KERNEL);
368 retval = kernel_read(interpreter,interp_elf_ex->e_phoff,(char *)elf_phdata,size);
370 if (retval != size) {
376 total_size = total_mapping_size(elf_phdata, interp_elf_ex->e_phnum);
381 for (i=0; i<interp_elf_ex->e_phnum; i++, eppnt++) {
382 if (eppnt->p_type == PT_LOAD) {
383 int elf_type = MAP_PRIVATE | MAP_DENYWRITE;
385 unsigned long vaddr = 0;
386 unsigned long k, map_addr;
388 if (eppnt->p_flags & PF_R) elf_prot = PROT_READ;
389 if (eppnt->p_flags & PF_W) elf_prot |= PROT_WRITE;
390 if (eppnt->p_flags & PF_X) elf_prot |= PROT_EXEC;
391 vaddr = eppnt->p_vaddr;
392 if (interp_elf_ex->e_type == ET_EXEC || load_addr_set)
393 elf_type |= MAP_FIXED;
394 else if (no_base && interp_elf_ex->e_type == ET_DYN)
397 map_addr = elf_map(interpreter, load_addr + vaddr, eppnt, elf_prot, elf_type, total_size);
400 if (BAD_ADDR(map_addr))
403 if (!load_addr_set && interp_elf_ex->e_type == ET_DYN) {
404 load_addr = map_addr - ELF_PAGESTART(vaddr);
409 * Check to see if the section's size will overflow the
410 * allowed task size. Note that p_filesz must always be
411 * <= p_memsize so it is only necessary to check p_memsz.
413 k = load_addr + eppnt->p_vaddr;
414 if (k > TASK_SIZE || eppnt->p_filesz > eppnt->p_memsz ||
415 eppnt->p_memsz > TASK_SIZE || TASK_SIZE - eppnt->p_memsz < k) {
421 * Find the end of the file mapping for this phdr, and keep
422 * track of the largest address we see for this.
424 k = load_addr + eppnt->p_vaddr + eppnt->p_filesz;
429 * Do the same thing for the memory mapping - between
430 * elf_bss and last_bss is the bss section.
432 k = load_addr + eppnt->p_memsz + eppnt->p_vaddr;
439 * Now fill out the bss section. First pad the last page up
440 * to the page boundary, and then perform a mmap to make sure
441 * that there are zero-mapped pages up to and including the
445 elf_bss = ELF_PAGESTART(elf_bss + ELF_MIN_ALIGN - 1); /* What we have mapped so far */
447 /* Map the last of the bss segment */
448 if (last_bss > elf_bss) {
449 error = do_brk(elf_bss, last_bss - elf_bss);
454 *interp_load_addr = load_addr;
455 error = ((unsigned long) interp_elf_ex->e_entry) + load_addr;
463 static unsigned long load_aout_interp(struct exec * interp_ex,
464 struct file * interpreter)
466 unsigned long text_data, elf_entry = ~0UL;
470 current->mm->end_code = interp_ex->a_text;
471 text_data = interp_ex->a_text + interp_ex->a_data;
472 current->mm->end_data = text_data;
473 current->mm->brk = interp_ex->a_bss + text_data;
475 switch (N_MAGIC(*interp_ex)) {
478 addr = (char __user *)0;
482 offset = N_TXTOFF(*interp_ex);
483 addr = (char __user *) N_TXTADDR(*interp_ex);
489 do_brk(0, text_data);
490 if (!interpreter->f_op || !interpreter->f_op->read)
492 if (interpreter->f_op->read(interpreter, addr, text_data, &offset) < 0)
494 flush_icache_range((unsigned long)addr,
495 (unsigned long)addr + text_data);
497 do_brk(ELF_PAGESTART(text_data + ELF_MIN_ALIGN - 1),
499 elf_entry = interp_ex->a_entry;
506 * These are the functions used to load ELF style executables and shared
507 * libraries. There is no binary dependent code anywhere else.
510 #define INTERPRETER_NONE 0
511 #define INTERPRETER_AOUT 1
512 #define INTERPRETER_ELF 2
515 static int load_elf_binary(struct linux_binprm * bprm, struct pt_regs * regs)
517 struct file *interpreter = NULL; /* to shut gcc up */
518 unsigned long load_addr = 0, load_bias = 0;
519 int load_addr_set = 0;
520 char * elf_interpreter = NULL;
521 unsigned int interpreter_type = INTERPRETER_NONE;
522 unsigned char ibcs2_interpreter = 0;
524 struct elf_phdr * elf_ppnt, *elf_phdata;
525 unsigned long elf_bss, elf_brk;
529 unsigned long elf_entry, interp_load_addr = 0;
530 unsigned long start_code, end_code, start_data, end_data;
531 unsigned long reloc_func_desc = 0;
532 char passed_fileno[6];
533 struct files_struct *files;
534 int have_pt_gnu_stack, executable_stack, relocexec, old_relocexec = current->flags & PF_RELOCEXEC;
535 unsigned long def_flags = 0;
537 struct elfhdr elf_ex;
538 struct elfhdr interp_elf_ex;
539 struct exec interp_ex;
542 loc = kmalloc(sizeof(*loc), GFP_KERNEL);
548 /* Get the exec-header */
549 loc->elf_ex = *((struct elfhdr *) bprm->buf);
552 /* First of all, some simple consistency checks */
553 if (memcmp(loc->elf_ex.e_ident, ELFMAG, SELFMAG) != 0)
556 if (loc->elf_ex.e_type != ET_EXEC && loc->elf_ex.e_type != ET_DYN)
558 if (!elf_check_arch(&loc->elf_ex))
560 if (!bprm->file->f_op||!bprm->file->f_op->mmap)
563 /* Now read in all of the header information */
565 if (loc->elf_ex.e_phentsize != sizeof(struct elf_phdr))
567 if (loc->elf_ex.e_phnum < 1 ||
568 loc->elf_ex.e_phnum > 65536U / sizeof(struct elf_phdr))
570 size = loc->elf_ex.e_phnum * sizeof(struct elf_phdr);
572 elf_phdata = (struct elf_phdr *) kmalloc(size, GFP_KERNEL);
576 retval = kernel_read(bprm->file, loc->elf_ex.e_phoff, (char *) elf_phdata, size);
577 if (retval != size) {
583 files = current->files; /* Refcounted so ok */
584 retval = unshare_files();
587 if (files == current->files) {
588 put_files_struct(files);
592 /* exec will make our files private anyway, but for the a.out
593 loader stuff we need to do it earlier */
595 retval = get_unused_fd();
598 get_file(bprm->file);
599 fd_install(elf_exec_fileno = retval, bprm->file);
601 elf_ppnt = elf_phdata;
610 for (i = 0; i < loc->elf_ex.e_phnum; i++) {
611 if (elf_ppnt->p_type == PT_INTERP) {
612 /* This is the program interpreter used for
613 * shared libraries - for now assume that this
614 * is an a.out format binary
618 if (elf_ppnt->p_filesz > PATH_MAX ||
619 elf_ppnt->p_filesz < 2)
623 elf_interpreter = (char *) kmalloc(elf_ppnt->p_filesz,
625 if (!elf_interpreter)
628 retval = kernel_read(bprm->file, elf_ppnt->p_offset,
631 if (retval != elf_ppnt->p_filesz) {
634 goto out_free_interp;
636 /* make sure path is NULL terminated */
638 if (elf_interpreter[elf_ppnt->p_filesz - 1] != '\0')
639 goto out_free_interp;
641 /* If the program interpreter is one of these two,
642 * then assume an iBCS2 image. Otherwise assume
643 * a native linux image.
645 if (strcmp(elf_interpreter,"/usr/lib/libc.so.1") == 0 ||
646 strcmp(elf_interpreter,"/usr/lib/ld.so.1") == 0)
647 ibcs2_interpreter = 1;
650 * The early SET_PERSONALITY here is so that the lookup
651 * for the interpreter happens in the namespace of the
652 * to-be-execed image. SET_PERSONALITY can select an
655 * However, SET_PERSONALITY is NOT allowed to switch
656 * this task into the new images's memory mapping
657 * policy - that is, TASK_SIZE must still evaluate to
658 * that which is appropriate to the execing application.
659 * This is because exit_mmap() needs to have TASK_SIZE
660 * evaluate to the size of the old image.
662 * So if (say) a 64-bit application is execing a 32-bit
663 * application it is the architecture's responsibility
664 * to defer changing the value of TASK_SIZE until the
665 * switch really is going to happen - do this in
666 * flush_thread(). - akpm
668 SET_PERSONALITY(loc->elf_ex, ibcs2_interpreter);
670 interpreter = open_exec(elf_interpreter);
671 retval = PTR_ERR(interpreter);
672 if (IS_ERR(interpreter))
673 goto out_free_interp;
674 retval = kernel_read(interpreter, 0, bprm->buf, BINPRM_BUF_SIZE);
675 if (retval != BINPRM_BUF_SIZE) {
678 goto out_free_dentry;
681 /* Get the exec headers */
682 loc->interp_ex = *((struct exec *) bprm->buf);
683 loc->interp_elf_ex = *((struct elfhdr *) bprm->buf);
689 elf_ppnt = elf_phdata;
690 executable_stack = EXSTACK_DEFAULT;
692 for (i = 0; i < loc->elf_ex.e_phnum; i++, elf_ppnt++)
693 if (elf_ppnt->p_type == PT_GNU_STACK) {
694 if (elf_ppnt->p_flags & PF_X)
695 executable_stack = EXSTACK_ENABLE_X;
697 executable_stack = EXSTACK_DISABLE_X;
700 have_pt_gnu_stack = (i < loc->elf_ex.e_phnum);
704 if (current->personality == PER_LINUX)
705 switch (exec_shield) {
707 if (executable_stack == EXSTACK_DISABLE_X) {
708 current->flags |= PF_RELOCEXEC;
709 relocexec = PF_RELOCEXEC;
714 executable_stack = EXSTACK_DISABLE_X;
715 current->flags |= PF_RELOCEXEC;
716 relocexec = PF_RELOCEXEC;
720 /* Some simple consistency checks for the interpreter */
721 if (elf_interpreter) {
722 interpreter_type = INTERPRETER_ELF | INTERPRETER_AOUT;
724 /* Now figure out which format our binary is */
725 if ((N_MAGIC(loc->interp_ex) != OMAGIC) &&
726 (N_MAGIC(loc->interp_ex) != ZMAGIC) &&
727 (N_MAGIC(loc->interp_ex) != QMAGIC))
728 interpreter_type = INTERPRETER_ELF;
730 if (memcmp(loc->interp_elf_ex.e_ident, ELFMAG, SELFMAG) != 0)
731 interpreter_type &= ~INTERPRETER_ELF;
734 if (!interpreter_type)
735 goto out_free_dentry;
737 /* Make sure only one type was selected */
738 if ((interpreter_type & INTERPRETER_ELF) &&
739 interpreter_type != INTERPRETER_ELF) {
740 // FIXME - ratelimit this before re-enabling
741 // printk(KERN_WARNING "ELF: Ambiguous type, using ELF\n");
742 interpreter_type = INTERPRETER_ELF;
744 /* Verify the interpreter has a valid arch */
745 if ((interpreter_type == INTERPRETER_ELF) &&
746 !elf_check_arch(&loc->interp_elf_ex))
747 goto out_free_dentry;
749 /* Executables without an interpreter also need a personality */
750 SET_PERSONALITY(loc->elf_ex, ibcs2_interpreter);
753 /* OK, we are done with that, now set up the arg stuff,
754 and then start this sucker up */
756 if ((!bprm->sh_bang) && (interpreter_type == INTERPRETER_AOUT)) {
757 char *passed_p = passed_fileno;
758 sprintf(passed_fileno, "%d", elf_exec_fileno);
760 if (elf_interpreter) {
761 retval = copy_strings_kernel(1, &passed_p, bprm);
763 goto out_free_dentry;
768 /* Flush all traces of the currently running executable */
769 retval = flush_old_exec(bprm);
771 goto out_free_dentry;
772 current->flags |= relocexec;
776 * Turn off the CS limit completely if exec-shield disabled or
779 if (!exec_shield || executable_stack != EXSTACK_DISABLE_X || nx_enabled)
780 arch_add_exec_range(current->mm, -1);
783 /* Discard our unneeded old files struct */
786 put_files_struct(files);
790 /* OK, This is the point of no return */
791 current->mm->start_data = 0;
792 current->mm->end_data = 0;
793 current->mm->end_code = 0;
794 current->mm->mmap = NULL;
795 current->flags &= ~PF_FORKNOEXEC;
796 current->mm->def_flags = def_flags;
798 /* Do this immediately, since STACK_TOP as used in setup_arg_pages
799 may depend on the personality. */
800 SET_PERSONALITY(loc->elf_ex, ibcs2_interpreter);
801 if (exec_shield != 2 &&
802 elf_read_implies_exec(loc->elf_ex, have_pt_gnu_stack))
803 current->personality |= READ_IMPLIES_EXEC;
805 arch_pick_mmap_layout(current->mm);
807 /* Do this so that we can load the interpreter, if need be. We will
808 change some of these later */
809 // current->mm->rss = 0;
810 vx_rsspages_sub(current->mm, current->mm->rss);
811 current->mm->free_area_cache = current->mm->mmap_base;
812 retval = setup_arg_pages(bprm, executable_stack);
814 send_sig(SIGKILL, current, 0);
815 goto out_free_dentry;
818 current->mm->start_stack = bprm->p;
821 /* Now we do a little grungy work by mmaping the ELF image into
822 the correct location in memory.
825 for(i = 0, elf_ppnt = elf_phdata; i < loc->elf_ex.e_phnum; i++, elf_ppnt++) {
826 int elf_prot = 0, elf_flags;
827 unsigned long k, vaddr;
829 if (elf_ppnt->p_type != PT_LOAD)
832 if (unlikely (elf_brk > elf_bss)) {
835 /* There was a PT_LOAD segment with p_memsz > p_filesz
836 before this one. Map anonymous pages, if needed,
837 and clear the area. */
838 retval = set_brk (elf_bss + load_bias,
839 elf_brk + load_bias);
841 send_sig(SIGKILL, current, 0);
842 goto out_free_dentry;
844 nbyte = ELF_PAGEOFFSET(elf_bss);
846 nbyte = ELF_MIN_ALIGN - nbyte;
847 if (nbyte > elf_brk - elf_bss)
848 nbyte = elf_brk - elf_bss;
849 clear_user((void __user *) elf_bss + load_bias, nbyte);
853 if (elf_ppnt->p_flags & PF_R) elf_prot |= PROT_READ;
854 if (elf_ppnt->p_flags & PF_W) elf_prot |= PROT_WRITE;
855 if (elf_ppnt->p_flags & PF_X) elf_prot |= PROT_EXEC;
857 elf_flags = MAP_PRIVATE|MAP_DENYWRITE|MAP_EXECUTABLE;
859 vaddr = elf_ppnt->p_vaddr;
860 if (loc->elf_ex.e_type == ET_EXEC || load_addr_set)
861 elf_flags |= MAP_FIXED;
862 else if (loc->elf_ex.e_type == ET_DYN)
866 load_bias = ELF_PAGESTART(ELF_ET_DYN_BASE - vaddr);
869 error = elf_map(bprm->file, load_bias + vaddr, elf_ppnt, elf_prot, elf_flags, 0);
870 if (BAD_ADDR(error)) {
871 send_sig(SIGKILL, current, 0);
872 goto out_free_dentry;
875 if (!load_addr_set) {
877 load_addr = (elf_ppnt->p_vaddr - elf_ppnt->p_offset);
878 if (loc->elf_ex.e_type == ET_DYN) {
880 ELF_PAGESTART(load_bias + vaddr);
881 load_addr += load_bias;
882 reloc_func_desc = load_bias;
885 k = elf_ppnt->p_vaddr;
886 if (k < start_code) start_code = k;
887 if (start_data < k) start_data = k;
890 * Check to see if the section's size will overflow the
891 * allowed task size. Note that p_filesz must always be
892 * <= p_memsz so it is only necessary to check p_memsz.
894 if (k > TASK_SIZE || elf_ppnt->p_filesz > elf_ppnt->p_memsz ||
895 elf_ppnt->p_memsz > TASK_SIZE ||
896 TASK_SIZE - elf_ppnt->p_memsz < k) {
897 /* set_brk can never work. Avoid overflows. */
898 send_sig(SIGKILL, current, 0);
899 goto out_free_dentry;
902 k = elf_ppnt->p_vaddr + elf_ppnt->p_filesz;
906 if ((elf_ppnt->p_flags & PF_X) && end_code < k)
910 k = elf_ppnt->p_vaddr + elf_ppnt->p_memsz;
915 loc->elf_ex.e_entry += load_bias;
916 elf_bss += load_bias;
917 elf_brk += load_bias;
918 start_code += load_bias;
919 end_code += load_bias;
920 start_data += load_bias;
921 end_data += load_bias;
923 /* Calling set_brk effectively mmaps the pages that we need
924 * for the bss and break sections. We must do this before
925 * mapping in the interpreter, to make sure it doesn't wind
926 * up getting placed where the bss needs to go.
928 retval = set_brk(elf_bss, elf_brk);
930 send_sig(SIGKILL, current, 0);
931 goto out_free_dentry;
935 if (elf_interpreter) {
936 if (interpreter_type == INTERPRETER_AOUT)
937 elf_entry = load_aout_interp(&loc->interp_ex,
940 elf_entry = load_elf_interp(&loc->interp_elf_ex,
944 if (BAD_ADDR(elf_entry)) {
945 printk(KERN_ERR "Unable to load interpreter %.128s\n",
947 force_sig(SIGSEGV, current);
948 retval = -ENOEXEC; /* Nobody gets to see this, but.. */
949 goto out_free_dentry;
951 reloc_func_desc = interp_load_addr;
953 allow_write_access(interpreter);
955 kfree(elf_interpreter);
957 elf_entry = loc->elf_ex.e_entry;
962 if (interpreter_type != INTERPRETER_AOUT)
963 sys_close(elf_exec_fileno);
965 set_binfmt(&elf_format);
968 * Map the vsyscall trampoline. This address is then passed via
971 #ifdef __HAVE_ARCH_VSYSCALL
976 current->flags &= ~PF_FORKNOEXEC;
977 create_elf_tables(bprm, &loc->elf_ex, (interpreter_type == INTERPRETER_AOUT),
978 load_addr, interp_load_addr);
979 /* N.B. passed_fileno might not be initialized? */
980 if (interpreter_type == INTERPRETER_AOUT)
981 current->mm->arg_start += strlen(passed_fileno) + 1;
982 current->mm->end_code = end_code;
983 current->mm->start_code = start_code;
984 current->mm->start_data = start_data;
985 current->mm->end_data = end_data;
986 current->mm->start_stack = bprm->p;
988 #ifdef __HAVE_ARCH_RANDOMIZE_BRK
989 if (current->flags & PF_RELOCEXEC)
990 randomize_brk(elf_brk);
992 if (current->personality & MMAP_PAGE_ZERO) {
993 /* Why this, you ask??? Well SVr4 maps page 0 as read-only,
994 and some applications "depend" upon this behavior.
995 Since we do not have the power to recompile these, we
996 emulate the SVr4 behavior. Sigh. */
997 down_write(¤t->mm->mmap_sem);
998 error = do_mmap(NULL, 0, PAGE_SIZE, PROT_READ | PROT_EXEC,
999 MAP_FIXED | MAP_PRIVATE, 0);
1000 up_write(¤t->mm->mmap_sem);
1003 #ifdef ELF_PLAT_INIT
1005 * The ABI may specify that certain registers be set up in special
1006 * ways (on i386 %edx is the address of a DT_FINI function, for
1007 * example. In addition, it may also specify (eg, PowerPC64 ELF)
1008 * that the e_entry field is the address of the function descriptor
1009 * for the startup routine, rather than the address of the startup
1010 * routine itself. This macro performs whatever initialization to
1011 * the regs structure is required as well as any relocations to the
1012 * function descriptor entries when executing dynamically links apps.
1014 ELF_PLAT_INIT(regs, reloc_func_desc);
1017 start_thread(regs, elf_entry, bprm->p);
1018 if (unlikely(current->ptrace & PT_PTRACED)) {
1019 if (current->ptrace & PT_TRACE_EXEC)
1020 ptrace_notify ((PTRACE_EVENT_EXEC << 8) | SIGTRAP);
1022 send_sig(SIGTRAP, current, 0);
1032 allow_write_access(interpreter);
1036 if (elf_interpreter)
1037 kfree(elf_interpreter);
1039 sys_close(elf_exec_fileno);
1042 put_files_struct(current->files);
1043 current->files = files;
1047 current->flags &= ~PF_RELOCEXEC;
1048 current->flags |= old_relocexec;
1052 /* This is really simpleminded and specialized - we are loading an
1053 a.out library that is given an ELF header. */
1055 static int load_elf_library(struct file *file)
1057 struct elf_phdr *elf_phdata;
1058 struct elf_phdr *eppnt;
1059 unsigned long elf_bss, bss, len;
1060 int retval, error, i, j;
1061 struct elfhdr elf_ex;
1064 retval = kernel_read(file, 0, (char *) &elf_ex, sizeof(elf_ex));
1065 if (retval != sizeof(elf_ex))
1068 if (memcmp(elf_ex.e_ident, ELFMAG, SELFMAG) != 0)
1071 /* First of all, some simple consistency checks */
1072 if (elf_ex.e_type != ET_EXEC || elf_ex.e_phnum > 2 ||
1073 !elf_check_arch(&elf_ex) || !file->f_op || !file->f_op->mmap)
1076 /* Now read in all of the header information */
1078 j = sizeof(struct elf_phdr) * elf_ex.e_phnum;
1079 /* j < ELF_MIN_ALIGN because elf_ex.e_phnum <= 2 */
1082 elf_phdata = kmalloc(j, GFP_KERNEL);
1088 retval = kernel_read(file, elf_ex.e_phoff, (char *)eppnt, j);
1092 for (j = 0, i = 0; i<elf_ex.e_phnum; i++)
1093 if ((eppnt + i)->p_type == PT_LOAD)
1098 while (eppnt->p_type != PT_LOAD)
1101 /* Now use mmap to map the library into memory. */
1102 down_write(¤t->mm->mmap_sem);
1103 error = do_mmap(file,
1104 ELF_PAGESTART(eppnt->p_vaddr),
1106 ELF_PAGEOFFSET(eppnt->p_vaddr)),
1107 PROT_READ | PROT_WRITE | PROT_EXEC,
1108 MAP_FIXED | MAP_PRIVATE | MAP_DENYWRITE,
1110 ELF_PAGEOFFSET(eppnt->p_vaddr)));
1111 up_write(¤t->mm->mmap_sem);
1112 if (error != ELF_PAGESTART(eppnt->p_vaddr))
1115 elf_bss = eppnt->p_vaddr + eppnt->p_filesz;
1118 len = ELF_PAGESTART(eppnt->p_filesz + eppnt->p_vaddr + ELF_MIN_ALIGN - 1);
1119 bss = eppnt->p_memsz + eppnt->p_vaddr;
1121 do_brk(len, bss - len);
1131 * Note that some platforms still use traditional core dumps and not
1132 * the ELF core dump. Each platform can select it as appropriate.
1134 #ifdef USE_ELF_CORE_DUMP
1139 * Modelled on fs/exec.c:aout_core_dump()
1140 * Jeremy Fitzhardinge <jeremy@sw.oz.au>
1143 * These are the only things you should do on a core-file: use only these
1144 * functions to write out all the necessary info.
1146 static int dump_write(struct file *file, const void *addr, int nr)
1148 return file->f_op->write(file, addr, nr, &file->f_pos) == nr;
1151 static int dump_seek(struct file *file, off_t off)
1153 if (file->f_op->llseek) {
1154 if (file->f_op->llseek(file, off, 0) != off)
1162 * Decide whether a segment is worth dumping; default is yes to be
1163 * sure (missing info is worse than too much; etc).
1164 * Personally I'd include everything, and use the coredump limit...
1166 * I think we should skip something. But I am not sure how. H.J.
1168 static int maydump(struct vm_area_struct *vma)
1170 /* Do not dump I/O mapped devices, shared memory, or special mappings */
1171 if (vma->vm_flags & (VM_IO | VM_SHARED | VM_RESERVED))
1174 /* If it hasn't been written to, don't write it out */
1181 #define roundup(x, y) ((((x)+((y)-1))/(y))*(y))
1183 /* An ELF note in memory */
1188 unsigned int datasz;
1192 static int notesize(struct memelfnote *en)
1196 sz = sizeof(struct elf_note);
1197 sz += roundup(strlen(en->name) + 1, 4);
1198 sz += roundup(en->datasz, 4);
1203 #define DUMP_WRITE(addr, nr) \
1204 do { if (!dump_write(file, (addr), (nr))) return 0; } while(0)
1205 #define DUMP_SEEK(off) \
1206 do { if (!dump_seek(file, (off))) return 0; } while(0)
1208 static int writenote(struct memelfnote *men, struct file *file)
1212 en.n_namesz = strlen(men->name) + 1;
1213 en.n_descsz = men->datasz;
1214 en.n_type = men->type;
1216 DUMP_WRITE(&en, sizeof(en));
1217 DUMP_WRITE(men->name, en.n_namesz);
1218 /* XXX - cast from long long to long to avoid need for libgcc.a */
1219 DUMP_SEEK(roundup((unsigned long)file->f_pos, 4)); /* XXX */
1220 DUMP_WRITE(men->data, men->datasz);
1221 DUMP_SEEK(roundup((unsigned long)file->f_pos, 4)); /* XXX */
1228 #define DUMP_WRITE(addr, nr) \
1229 if ((size += (nr)) > limit || !dump_write(file, (addr), (nr))) \
1231 #define DUMP_SEEK(off) \
1232 if (!dump_seek(file, (off))) \
1235 static inline void fill_elf_header(struct elfhdr *elf, int segs)
1237 memcpy(elf->e_ident, ELFMAG, SELFMAG);
1238 elf->e_ident[EI_CLASS] = ELF_CLASS;
1239 elf->e_ident[EI_DATA] = ELF_DATA;
1240 elf->e_ident[EI_VERSION] = EV_CURRENT;
1241 elf->e_ident[EI_OSABI] = ELF_OSABI;
1242 memset(elf->e_ident+EI_PAD, 0, EI_NIDENT-EI_PAD);
1244 elf->e_type = ET_CORE;
1245 elf->e_machine = ELF_ARCH;
1246 elf->e_version = EV_CURRENT;
1248 elf->e_phoff = sizeof(struct elfhdr);
1251 elf->e_ehsize = sizeof(struct elfhdr);
1252 elf->e_phentsize = sizeof(struct elf_phdr);
1253 elf->e_phnum = segs;
1254 elf->e_shentsize = 0;
1256 elf->e_shstrndx = 0;
1260 static inline void fill_elf_note_phdr(struct elf_phdr *phdr, int sz, off_t offset)
1262 phdr->p_type = PT_NOTE;
1263 phdr->p_offset = offset;
1266 phdr->p_filesz = sz;
1273 static void fill_note(struct memelfnote *note, const char *name, int type,
1274 unsigned int sz, void *data)
1284 * fill up all the fields in prstatus from the given task struct, except registers
1285 * which need to be filled up separately.
1287 static void fill_prstatus(struct elf_prstatus *prstatus,
1288 struct task_struct *p, long signr)
1290 prstatus->pr_info.si_signo = prstatus->pr_cursig = signr;
1291 prstatus->pr_sigpend = p->pending.signal.sig[0];
1292 prstatus->pr_sighold = p->blocked.sig[0];
1293 prstatus->pr_pid = p->pid;
1294 prstatus->pr_ppid = p->parent->pid;
1295 prstatus->pr_pgrp = process_group(p);
1296 prstatus->pr_sid = p->signal->session;
1297 if (p->pid == p->tgid) {
1299 * This is the record for the group leader. Add in the
1300 * cumulative times of previous dead threads. This total
1301 * won't include the time of each live thread whose state
1302 * is included in the core dump. The final total reported
1303 * to our parent process when it calls wait4 will include
1304 * those sums as well as the little bit more time it takes
1305 * this and each other thread to finish dying after the
1306 * core dump synchronization phase.
1308 jiffies_to_timeval(p->utime + p->signal->utime,
1309 &prstatus->pr_utime);
1310 jiffies_to_timeval(p->stime + p->signal->stime,
1311 &prstatus->pr_stime);
1313 jiffies_to_timeval(p->utime, &prstatus->pr_utime);
1314 jiffies_to_timeval(p->stime, &prstatus->pr_stime);
1316 jiffies_to_timeval(p->signal->cutime, &prstatus->pr_cutime);
1317 jiffies_to_timeval(p->signal->cstime, &prstatus->pr_cstime);
1320 static void fill_psinfo(struct elf_prpsinfo *psinfo, struct task_struct *p,
1321 struct mm_struct *mm)
1325 /* first copy the parameters from user space */
1326 memset(psinfo, 0, sizeof(struct elf_prpsinfo));
1328 len = mm->arg_end - mm->arg_start;
1329 if (len >= ELF_PRARGSZ)
1330 len = ELF_PRARGSZ-1;
1331 copy_from_user(&psinfo->pr_psargs,
1332 (const char __user *)mm->arg_start, len);
1333 for(i = 0; i < len; i++)
1334 if (psinfo->pr_psargs[i] == 0)
1335 psinfo->pr_psargs[i] = ' ';
1336 psinfo->pr_psargs[len] = 0;
1338 psinfo->pr_pid = p->pid;
1339 psinfo->pr_ppid = p->parent->pid;
1340 psinfo->pr_pgrp = process_group(p);
1341 psinfo->pr_sid = p->signal->session;
1343 i = p->state ? ffz(~p->state) + 1 : 0;
1344 psinfo->pr_state = i;
1345 psinfo->pr_sname = (i < 0 || i > 5) ? '.' : "RSDTZW"[i];
1346 psinfo->pr_zomb = psinfo->pr_sname == 'Z';
1347 psinfo->pr_nice = task_nice(p);
1348 psinfo->pr_flag = p->flags;
1349 SET_UID(psinfo->pr_uid, p->uid);
1350 SET_GID(psinfo->pr_gid, p->gid);
1351 strncpy(psinfo->pr_fname, p->comm, sizeof(psinfo->pr_fname));
1356 /* Here is the structure in which status of each thread is captured. */
1357 struct elf_thread_status
1359 struct list_head list;
1360 struct elf_prstatus prstatus; /* NT_PRSTATUS */
1361 elf_fpregset_t fpu; /* NT_PRFPREG */
1362 struct task_struct *thread;
1363 #ifdef ELF_CORE_COPY_XFPREGS
1364 elf_fpxregset_t xfpu; /* NT_PRXFPREG */
1366 struct memelfnote notes[3];
1371 * In order to add the specific thread information for the elf file format,
1372 * we need to keep a linked list of every threads pr_status and then
1373 * create a single section for them in the final core file.
1375 static int elf_dump_thread_status(long signr, struct elf_thread_status *t)
1378 struct task_struct *p = t->thread;
1381 fill_prstatus(&t->prstatus, p, signr);
1382 elf_core_copy_task_regs(p, &t->prstatus.pr_reg);
1384 fill_note(&t->notes[0], "CORE", NT_PRSTATUS, sizeof(t->prstatus), &(t->prstatus));
1386 sz += notesize(&t->notes[0]);
1388 if ((t->prstatus.pr_fpvalid = elf_core_copy_task_fpregs(p, NULL, &t->fpu))) {
1389 fill_note(&t->notes[1], "CORE", NT_PRFPREG, sizeof(t->fpu), &(t->fpu));
1391 sz += notesize(&t->notes[1]);
1394 #ifdef ELF_CORE_COPY_XFPREGS
1395 if (elf_core_copy_task_xfpregs(p, &t->xfpu)) {
1396 fill_note(&t->notes[2], "LINUX", NT_PRXFPREG, sizeof(t->xfpu), &t->xfpu);
1398 sz += notesize(&t->notes[2]);
1407 * This is a two-pass process; first we find the offsets of the bits,
1408 * and then they are actually written out. If we run out of core limit
1411 static int elf_core_dump(long signr, struct pt_regs * regs, struct file * file)
1419 struct vm_area_struct *vma;
1420 struct elfhdr *elf = NULL;
1421 off_t offset = 0, dataoff;
1422 unsigned long limit = current->signal->rlim[RLIMIT_CORE].rlim_cur;
1424 struct memelfnote *notes = NULL;
1425 struct elf_prstatus *prstatus = NULL; /* NT_PRSTATUS */
1426 struct elf_prpsinfo *psinfo = NULL; /* NT_PRPSINFO */
1427 struct task_struct *g, *p;
1428 LIST_HEAD(thread_list);
1429 struct list_head *t;
1430 elf_fpregset_t *fpu = NULL;
1431 #ifdef ELF_CORE_COPY_XFPREGS
1432 elf_fpxregset_t *xfpu = NULL;
1434 int thread_status_size = 0;
1438 * We no longer stop all VM operations.
1440 * This is because those proceses that could possibly change map_count or
1441 * the mmap / vma pages are now blocked in do_exit on current finishing
1444 * Only ptrace can touch these memory addresses, but it doesn't change
1445 * the map_count or the pages allocated. So no possibility of crashing
1446 * exists while dumping the mm->vm_next areas to the core file.
1449 /* alloc memory for large data structures: too large to be on stack */
1450 elf = kmalloc(sizeof(*elf), GFP_KERNEL);
1453 prstatus = kmalloc(sizeof(*prstatus), GFP_KERNEL);
1456 psinfo = kmalloc(sizeof(*psinfo), GFP_KERNEL);
1459 notes = kmalloc(NUM_NOTES * sizeof(struct memelfnote), GFP_KERNEL);
1462 fpu = kmalloc(sizeof(*fpu), GFP_KERNEL);
1465 #ifdef ELF_CORE_COPY_XFPREGS
1466 xfpu = kmalloc(sizeof(*xfpu), GFP_KERNEL);
1472 struct elf_thread_status *tmp;
1473 read_lock(&tasklist_lock);
1475 if (current->mm == p->mm && current != p) {
1476 tmp = kmalloc(sizeof(*tmp), GFP_ATOMIC);
1478 read_unlock(&tasklist_lock);
1481 memset(tmp, 0, sizeof(*tmp));
1482 INIT_LIST_HEAD(&tmp->list);
1484 list_add(&tmp->list, &thread_list);
1486 while_each_thread(g,p);
1487 read_unlock(&tasklist_lock);
1488 list_for_each(t, &thread_list) {
1489 struct elf_thread_status *tmp;
1492 tmp = list_entry(t, struct elf_thread_status, list);
1493 sz = elf_dump_thread_status(signr, tmp);
1494 thread_status_size += sz;
1497 /* now collect the dump for the current */
1498 memset(prstatus, 0, sizeof(*prstatus));
1499 fill_prstatus(prstatus, current, signr);
1500 elf_core_copy_regs(&prstatus->pr_reg, regs);
1502 segs = current->mm->map_count;
1503 #ifdef ELF_CORE_EXTRA_PHDRS
1504 segs += ELF_CORE_EXTRA_PHDRS;
1508 fill_elf_header(elf, segs+1); /* including notes section */
1511 current->flags |= PF_DUMPCORE;
1514 * Set up the notes in similar form to SVR4 core dumps made
1515 * with info from their /proc.
1518 fill_note(notes +0, "CORE", NT_PRSTATUS, sizeof(*prstatus), prstatus);
1520 fill_psinfo(psinfo, current->group_leader, current->mm);
1521 fill_note(notes +1, "CORE", NT_PRPSINFO, sizeof(*psinfo), psinfo);
1523 fill_note(notes +2, "CORE", NT_TASKSTRUCT, sizeof(*current), current);
1527 auxv = (elf_addr_t *) current->mm->saved_auxv;
1532 while (auxv[i - 2] != AT_NULL);
1533 fill_note(¬es[numnote++], "CORE", NT_AUXV,
1534 i * sizeof (elf_addr_t), auxv);
1536 /* Try to dump the FPU. */
1537 if ((prstatus->pr_fpvalid = elf_core_copy_task_fpregs(current, regs, fpu)))
1538 fill_note(notes + numnote++,
1539 "CORE", NT_PRFPREG, sizeof(*fpu), fpu);
1540 #ifdef ELF_CORE_COPY_XFPREGS
1541 if (elf_core_copy_task_xfpregs(current, xfpu))
1542 fill_note(notes + numnote++,
1543 "LINUX", NT_PRXFPREG, sizeof(*xfpu), xfpu);
1549 DUMP_WRITE(elf, sizeof(*elf));
1550 offset += sizeof(*elf); /* Elf header */
1551 offset += (segs+1) * sizeof(struct elf_phdr); /* Program headers */
1553 /* Write notes phdr entry */
1555 struct elf_phdr phdr;
1558 for (i = 0; i < numnote; i++)
1559 sz += notesize(notes + i);
1561 sz += thread_status_size;
1563 fill_elf_note_phdr(&phdr, sz, offset);
1565 DUMP_WRITE(&phdr, sizeof(phdr));
1568 /* Page-align dumped data */
1569 dataoff = offset = roundup(offset, ELF_EXEC_PAGESIZE);
1571 /* Write program headers for segments dump */
1572 for (vma = current->mm->mmap; vma != NULL; vma = vma->vm_next) {
1573 struct elf_phdr phdr;
1576 sz = vma->vm_end - vma->vm_start;
1578 phdr.p_type = PT_LOAD;
1579 phdr.p_offset = offset;
1580 phdr.p_vaddr = vma->vm_start;
1582 phdr.p_filesz = maydump(vma) ? sz : 0;
1584 offset += phdr.p_filesz;
1585 phdr.p_flags = vma->vm_flags & VM_READ ? PF_R : 0;
1586 if (vma->vm_flags & VM_WRITE) phdr.p_flags |= PF_W;
1587 if (vma->vm_flags & VM_EXEC) phdr.p_flags |= PF_X;
1588 phdr.p_align = ELF_EXEC_PAGESIZE;
1590 DUMP_WRITE(&phdr, sizeof(phdr));
1593 #ifdef ELF_CORE_WRITE_EXTRA_PHDRS
1594 ELF_CORE_WRITE_EXTRA_PHDRS;
1597 /* write out the notes section */
1598 for (i = 0; i < numnote; i++)
1599 if (!writenote(notes + i, file))
1602 /* write out the thread status notes section */
1603 list_for_each(t, &thread_list) {
1604 struct elf_thread_status *tmp = list_entry(t, struct elf_thread_status, list);
1605 for (i = 0; i < tmp->num_notes; i++)
1606 if (!writenote(&tmp->notes[i], file))
1612 for (vma = current->mm->mmap; vma != NULL; vma = vma->vm_next) {
1618 for (addr = vma->vm_start;
1620 addr += PAGE_SIZE) {
1622 struct vm_area_struct *vma;
1624 if (get_user_pages(current, current->mm, addr, 1, 0, 1,
1625 &page, &vma) <= 0) {
1626 DUMP_SEEK (file->f_pos + PAGE_SIZE);
1628 if (page == ZERO_PAGE(addr)) {
1629 DUMP_SEEK (file->f_pos + PAGE_SIZE);
1632 flush_cache_page(vma, addr);
1634 if ((size += PAGE_SIZE) > limit ||
1635 !dump_write(file, kaddr,
1638 page_cache_release(page);
1643 page_cache_release(page);
1648 #ifdef ELF_CORE_WRITE_EXTRA_DATA
1649 ELF_CORE_WRITE_EXTRA_DATA;
1652 if ((off_t) file->f_pos != offset) {
1654 printk("elf_core_dump: file->f_pos (%ld) != offset (%ld)\n",
1655 (off_t) file->f_pos, offset);
1662 while(!list_empty(&thread_list)) {
1663 struct list_head *tmp = thread_list.next;
1665 kfree(list_entry(tmp, struct elf_thread_status, list));
1673 #ifdef ELF_CORE_COPY_XFPREGS
1680 #endif /* USE_ELF_CORE_DUMP */
1682 static int __init init_elf_binfmt(void)
1684 return register_binfmt(&elf_format);
1687 static void __exit exit_elf_binfmt(void)
1689 /* Remove the COFF and ELF loaders. */
1690 unregister_binfmt(&elf_format);
1693 core_initcall(init_elf_binfmt);
1694 module_exit(exit_elf_binfmt);
1695 MODULE_LICENSE("GPL");