2 * linux/fs/binfmt_elf.c
4 * These are the functions used to load ELF format executables as used
5 * on SVr4 machines. Information on the format may be found in the book
6 * "UNIX SYSTEM V RELEASE 4 Programmers Guide: Ansi C and Programming Support
9 * Copyright 1993, 1994: Eric Youngdale (ericy@cais.com).
12 #include <linux/module.h>
13 #include <linux/kernel.h>
15 #include <linux/stat.h>
16 #include <linux/time.h>
18 #include <linux/mman.h>
19 #include <linux/a.out.h>
20 #include <linux/errno.h>
21 #include <linux/signal.h>
22 #include <linux/binfmts.h>
23 #include <linux/string.h>
24 #include <linux/file.h>
25 #include <linux/fcntl.h>
26 #include <linux/ptrace.h>
27 #include <linux/slab.h>
28 #include <linux/shm.h>
29 #include <linux/personality.h>
30 #include <linux/elfcore.h>
31 #include <linux/init.h>
32 #include <linux/highuid.h>
33 #include <linux/smp.h>
34 #include <linux/smp_lock.h>
35 #include <linux/compiler.h>
36 #include <linux/highmem.h>
37 #include <linux/pagemap.h>
38 #include <linux/security.h>
39 #include <linux/syscalls.h>
40 #include <linux/vs_memory.h>
42 #include <asm/uaccess.h>
43 #include <asm/param.h>
46 #include <linux/elf.h>
48 static int load_elf_binary(struct linux_binprm * bprm, struct pt_regs * regs);
49 static int load_elf_library(struct file*);
50 static unsigned long elf_map (struct file *, unsigned long, struct elf_phdr *, int, int, unsigned long);
51 extern int dump_fpu (struct pt_regs *, elf_fpregset_t *);
54 #define elf_addr_t unsigned long
58 * If we don't support core dumping, then supply a NULL so we
61 #ifdef USE_ELF_CORE_DUMP
62 static int elf_core_dump(long signr, struct pt_regs * regs, struct file * file);
64 #define elf_core_dump NULL
67 #if ELF_EXEC_PAGESIZE > PAGE_SIZE
68 # define ELF_MIN_ALIGN ELF_EXEC_PAGESIZE
70 # define ELF_MIN_ALIGN PAGE_SIZE
73 #define ELF_PAGESTART(_v) ((_v) & ~(unsigned long)(ELF_MIN_ALIGN-1))
74 #define ELF_PAGEOFFSET(_v) ((_v) & (ELF_MIN_ALIGN-1))
75 #define ELF_PAGEALIGN(_v) (((_v) + ELF_MIN_ALIGN - 1) & ~(ELF_MIN_ALIGN - 1))
77 static struct linux_binfmt elf_format = {
78 .module = THIS_MODULE,
79 .load_binary = load_elf_binary,
80 .load_shlib = load_elf_library,
81 .core_dump = elf_core_dump,
82 .min_coredump = ELF_EXEC_PAGESIZE
85 #define BAD_ADDR(x) ((unsigned long)(x) > TASK_SIZE)
87 static int set_brk(unsigned long start, unsigned long end)
89 start = ELF_PAGEALIGN(start);
90 end = ELF_PAGEALIGN(end);
92 unsigned long addr = do_brk(start, end - start);
96 current->mm->start_brk = current->mm->brk = end;
101 /* We need to explicitly zero any fractional pages
102 after the data section (i.e. bss). This would
103 contain the junk from the file that should not
107 static void padzero(unsigned long elf_bss)
111 nbyte = ELF_PAGEOFFSET(elf_bss);
113 nbyte = ELF_MIN_ALIGN - nbyte;
114 clear_user((void __user *) elf_bss, nbyte);
118 /* Let's use some macros to make this stack manipulation a litle clearer */
119 #ifdef CONFIG_STACK_GROWSUP
120 #define STACK_ADD(sp, items) ((elf_addr_t __user *)(sp) + (items))
121 #define STACK_ROUND(sp, items) \
122 ((15 + (unsigned long) ((sp) + (items))) &~ 15UL)
123 #define STACK_ALLOC(sp, len) ({ elf_addr_t __user *old_sp = (elf_addr_t __user *)sp; sp += len; old_sp; })
125 #define STACK_ADD(sp, items) ((elf_addr_t __user *)(sp) - (items))
126 #define STACK_ROUND(sp, items) \
127 (((unsigned long) (sp - items)) &~ 15UL)
128 #define STACK_ALLOC(sp, len) ({ sp -= len ; sp; })
132 create_elf_tables(struct linux_binprm *bprm, struct elfhdr * exec,
133 int interp_aout, unsigned long load_addr,
134 unsigned long interp_load_addr)
136 unsigned long p = bprm->p;
137 int argc = bprm->argc;
138 int envc = bprm->envc;
139 elf_addr_t __user *argv;
140 elf_addr_t __user *envp;
141 elf_addr_t __user *sp;
142 elf_addr_t __user *u_platform;
143 const char *k_platform = ELF_PLATFORM;
145 elf_addr_t *elf_info;
147 struct task_struct *tsk = current;
150 * If this architecture has a platform capability string, copy it
151 * to userspace. In some cases (Sparc), this info is impossible
152 * for userspace to get any other way, in others (i386) it is
158 size_t len = strlen(k_platform) + 1;
160 #ifdef __HAVE_ARCH_ALIGN_STACK
161 p = (unsigned long)arch_align_stack((unsigned long)p);
163 u_platform = (elf_addr_t __user *)STACK_ALLOC(p, len);
164 __copy_to_user(u_platform, k_platform, len);
167 /* Create the ELF interpreter info */
168 elf_info = (elf_addr_t *) current->mm->saved_auxv;
169 #define NEW_AUX_ENT(id, val) \
170 do { elf_info[ei_index++] = id; elf_info[ei_index++] = val; } while (0)
174 * ARCH_DLINFO must come first so PPC can do its special alignment of
179 NEW_AUX_ENT(AT_HWCAP, ELF_HWCAP);
180 NEW_AUX_ENT(AT_PAGESZ, ELF_EXEC_PAGESIZE);
181 NEW_AUX_ENT(AT_CLKTCK, CLOCKS_PER_SEC);
182 NEW_AUX_ENT(AT_PHDR, load_addr + exec->e_phoff);
183 NEW_AUX_ENT(AT_PHENT, sizeof (struct elf_phdr));
184 NEW_AUX_ENT(AT_PHNUM, exec->e_phnum);
185 NEW_AUX_ENT(AT_BASE, interp_load_addr);
186 NEW_AUX_ENT(AT_FLAGS, 0);
187 NEW_AUX_ENT(AT_ENTRY, exec->e_entry);
188 NEW_AUX_ENT(AT_UID, (elf_addr_t) tsk->uid);
189 NEW_AUX_ENT(AT_EUID, (elf_addr_t) tsk->euid);
190 NEW_AUX_ENT(AT_GID, (elf_addr_t) tsk->gid);
191 NEW_AUX_ENT(AT_EGID, (elf_addr_t) tsk->egid);
192 NEW_AUX_ENT(AT_SECURE, (elf_addr_t) security_bprm_secureexec(bprm));
194 NEW_AUX_ENT(AT_PLATFORM, (elf_addr_t)(unsigned long)u_platform);
196 if (bprm->interp_flags & BINPRM_FLAGS_EXECFD) {
197 NEW_AUX_ENT(AT_EXECFD, (elf_addr_t) bprm->interp_data);
200 /* AT_NULL is zero; clear the rest too */
201 memset(&elf_info[ei_index], 0,
202 sizeof current->mm->saved_auxv - ei_index * sizeof elf_info[0]);
204 /* And advance past the AT_NULL entry. */
207 sp = STACK_ADD(p, ei_index);
209 items = (argc + 1) + (envc + 1);
211 items += 3; /* a.out interpreters require argv & envp too */
213 items += 1; /* ELF interpreters only put argc on the stack */
215 bprm->p = STACK_ROUND(sp, items);
217 /* Point sp at the lowest address on the stack */
218 #ifdef CONFIG_STACK_GROWSUP
219 sp = (elf_addr_t __user *)bprm->p - items - ei_index;
220 bprm->exec = (unsigned long) sp; /* XXX: PARISC HACK */
222 sp = (elf_addr_t __user *)bprm->p;
225 /* Now, let's put argc (and argv, envp if appropriate) on the stack */
226 __put_user(argc, sp++);
229 envp = argv + argc + 1;
230 __put_user((elf_addr_t)(unsigned long)argv, sp++);
231 __put_user((elf_addr_t)(unsigned long)envp, sp++);
234 envp = argv + argc + 1;
237 /* Populate argv and envp */
238 p = current->mm->arg_start;
241 __put_user((elf_addr_t)p, argv++);
242 len = strnlen_user((void __user *)p, PAGE_SIZE*MAX_ARG_PAGES);
243 if (!len || len > PAGE_SIZE*MAX_ARG_PAGES)
248 current->mm->arg_end = current->mm->env_start = p;
251 __put_user((elf_addr_t)p, envp++);
252 len = strnlen_user((void __user *)p, PAGE_SIZE*MAX_ARG_PAGES);
253 if (!len || len > PAGE_SIZE*MAX_ARG_PAGES)
258 current->mm->env_end = p;
260 /* Put the elf_info on the stack in the right place. */
261 sp = (elf_addr_t __user *)envp + 1;
262 copy_to_user(sp, elf_info, ei_index * sizeof(elf_addr_t));
267 static unsigned long elf_map(struct file *filep, unsigned long addr,
268 struct elf_phdr *eppnt, int prot, int type,
269 unsigned long total_size)
271 unsigned long map_addr;
272 unsigned long size = eppnt->p_filesz + ELF_PAGEOFFSET(eppnt->p_vaddr);
273 unsigned long off = eppnt->p_offset - ELF_PAGEOFFSET(eppnt->p_vaddr);
275 addr = ELF_PAGESTART(addr);
276 size = ELF_PAGEALIGN(size);
278 down_write(¤t->mm->mmap_sem);
281 * total_size is the size of the ELF (interpreter) image.
282 * The _first_ mmap needs to know the full size, otherwise
283 * randomization might put this image into an overlapping
284 * position with the ELF binary image. (since size < total_size)
285 * So we first map the 'big' image - and unmap the remainder at
286 * the end. (which unmap is needed for ELF images with holes.)
289 total_size = ELF_PAGEALIGN(total_size);
290 map_addr = do_mmap(filep, addr, total_size, prot, type, off);
291 if (!BAD_ADDR(map_addr))
292 do_munmap(current->mm, map_addr+size, total_size-size);
294 map_addr = do_mmap(filep, addr, size, prot, type, off);
296 up_write(¤t->mm->mmap_sem);
301 #endif /* !elf_map */
303 static inline unsigned long total_mapping_size(struct elf_phdr *cmds, int nr)
305 int i, first_idx = -1, last_idx = -1;
307 for (i = 0; i < nr; i++)
308 if (cmds[i].p_type == PT_LOAD) {
317 return cmds[last_idx].p_vaddr + cmds[last_idx].p_memsz -
318 ELF_PAGESTART(cmds[first_idx].p_vaddr);
321 /* This is much more generalized than the library routine read function,
322 so we keep this separate. Technically the library read function
323 is only provided so that we can read a.out libraries that have
326 static unsigned long load_elf_interp(struct elfhdr * interp_elf_ex,
327 struct file * interpreter,
328 unsigned long *interp_load_addr,
329 unsigned long no_base)
331 struct elf_phdr *elf_phdata;
332 struct elf_phdr *eppnt;
333 unsigned long load_addr = 0;
334 int load_addr_set = 0;
335 unsigned long last_bss = 0, elf_bss = 0;
336 unsigned long error = ~0UL;
337 unsigned long total_size;
340 /* First of all, some simple consistency checks */
341 if (interp_elf_ex->e_type != ET_EXEC &&
342 interp_elf_ex->e_type != ET_DYN)
344 if (!elf_check_arch(interp_elf_ex))
346 if (!interpreter->f_op || !interpreter->f_op->mmap)
350 * If the size of this structure has changed, then punt, since
351 * we will be doing the wrong thing.
353 if (interp_elf_ex->e_phentsize != sizeof(struct elf_phdr))
355 if (interp_elf_ex->e_phnum < 1 ||
356 interp_elf_ex->e_phnum > 65536U / sizeof(struct elf_phdr))
359 /* Now read in all of the header information */
361 size = sizeof(struct elf_phdr) * interp_elf_ex->e_phnum;
362 if (size > ELF_MIN_ALIGN)
364 elf_phdata = (struct elf_phdr *) kmalloc(size, GFP_KERNEL);
368 retval = kernel_read(interpreter,interp_elf_ex->e_phoff,(char *)elf_phdata,size);
370 if (retval != size) {
376 total_size = total_mapping_size(elf_phdata, interp_elf_ex->e_phnum);
381 for (i=0; i<interp_elf_ex->e_phnum; i++, eppnt++) {
382 if (eppnt->p_type == PT_LOAD) {
383 int elf_type = MAP_PRIVATE | MAP_DENYWRITE;
385 unsigned long vaddr = 0;
386 unsigned long k, map_addr;
388 if (eppnt->p_flags & PF_R) elf_prot = PROT_READ;
389 if (eppnt->p_flags & PF_W) elf_prot |= PROT_WRITE;
390 if (eppnt->p_flags & PF_X) elf_prot |= PROT_EXEC;
391 vaddr = eppnt->p_vaddr;
392 if (interp_elf_ex->e_type == ET_EXEC || load_addr_set)
393 elf_type |= MAP_FIXED;
394 else if (no_base && interp_elf_ex->e_type == ET_DYN)
397 map_addr = elf_map(interpreter, load_addr + vaddr, eppnt, elf_prot, elf_type, total_size);
400 if (BAD_ADDR(map_addr))
403 if (!load_addr_set && interp_elf_ex->e_type == ET_DYN) {
404 load_addr = map_addr - ELF_PAGESTART(vaddr);
409 * Check to see if the section's size will overflow the
410 * allowed task size. Note that p_filesz must always be
411 * <= p_memsize so it is only necessary to check p_memsz.
413 k = load_addr + eppnt->p_vaddr;
414 if (k > TASK_SIZE || eppnt->p_filesz > eppnt->p_memsz ||
415 eppnt->p_memsz > TASK_SIZE || TASK_SIZE - eppnt->p_memsz < k) {
421 * Find the end of the file mapping for this phdr, and keep
422 * track of the largest address we see for this.
424 k = load_addr + eppnt->p_vaddr + eppnt->p_filesz;
429 * Do the same thing for the memory mapping - between
430 * elf_bss and last_bss is the bss section.
432 k = load_addr + eppnt->p_memsz + eppnt->p_vaddr;
439 * Now fill out the bss section. First pad the last page up
440 * to the page boundary, and then perform a mmap to make sure
441 * that there are zero-mapped pages up to and including the
445 elf_bss = ELF_PAGESTART(elf_bss + ELF_MIN_ALIGN - 1); /* What we have mapped so far */
447 /* Map the last of the bss segment */
448 if (last_bss > elf_bss) {
449 error = do_brk(elf_bss, last_bss - elf_bss);
454 *interp_load_addr = load_addr;
455 error = ((unsigned long) interp_elf_ex->e_entry) + load_addr;
463 static unsigned long load_aout_interp(struct exec * interp_ex,
464 struct file * interpreter)
466 unsigned long text_data, elf_entry = ~0UL;
470 current->mm->end_code = interp_ex->a_text;
471 text_data = interp_ex->a_text + interp_ex->a_data;
472 current->mm->end_data = text_data;
473 current->mm->brk = interp_ex->a_bss + text_data;
475 switch (N_MAGIC(*interp_ex)) {
478 addr = (char __user *)0;
482 offset = N_TXTOFF(*interp_ex);
483 addr = (char __user *) N_TXTADDR(*interp_ex);
489 do_brk(0, text_data);
490 if (!interpreter->f_op || !interpreter->f_op->read)
492 if (interpreter->f_op->read(interpreter, addr, text_data, &offset) < 0)
494 flush_icache_range((unsigned long)addr,
495 (unsigned long)addr + text_data);
497 do_brk(ELF_PAGESTART(text_data + ELF_MIN_ALIGN - 1),
499 elf_entry = interp_ex->a_entry;
506 * These are the functions used to load ELF style executables and shared
507 * libraries. There is no binary dependent code anywhere else.
510 #define INTERPRETER_NONE 0
511 #define INTERPRETER_AOUT 1
512 #define INTERPRETER_ELF 2
515 static int load_elf_binary(struct linux_binprm * bprm, struct pt_regs * regs)
517 struct file *interpreter = NULL; /* to shut gcc up */
518 unsigned long load_addr = 0, load_bias = 0;
519 int load_addr_set = 0;
520 char * elf_interpreter = NULL;
521 unsigned int interpreter_type = INTERPRETER_NONE;
522 unsigned char ibcs2_interpreter = 0;
524 struct elf_phdr * elf_ppnt, *elf_phdata;
525 unsigned long elf_bss, elf_brk;
529 unsigned long elf_entry, interp_load_addr = 0;
530 unsigned long start_code, end_code, start_data, end_data;
531 unsigned long reloc_func_desc = 0;
532 char passed_fileno[6];
533 struct files_struct *files;
534 int have_pt_gnu_stack, executable_stack, relocexec, old_relocexec = current->flags & PF_RELOCEXEC;
535 unsigned long def_flags = 0;
537 struct elfhdr elf_ex;
538 struct elfhdr interp_elf_ex;
539 struct exec interp_ex;
542 loc = kmalloc(sizeof(*loc), GFP_KERNEL);
548 /* Get the exec-header */
549 loc->elf_ex = *((struct elfhdr *) bprm->buf);
552 /* First of all, some simple consistency checks */
553 if (memcmp(loc->elf_ex.e_ident, ELFMAG, SELFMAG) != 0)
556 if (loc->elf_ex.e_type != ET_EXEC && loc->elf_ex.e_type != ET_DYN)
558 if (!elf_check_arch(&loc->elf_ex))
560 if (!bprm->file->f_op||!bprm->file->f_op->mmap)
563 /* Now read in all of the header information */
565 if (loc->elf_ex.e_phentsize != sizeof(struct elf_phdr))
567 if (loc->elf_ex.e_phnum < 1 ||
568 loc->elf_ex.e_phnum > 65536U / sizeof(struct elf_phdr))
570 size = loc->elf_ex.e_phnum * sizeof(struct elf_phdr);
572 elf_phdata = (struct elf_phdr *) kmalloc(size, GFP_KERNEL);
576 retval = kernel_read(bprm->file, loc->elf_ex.e_phoff, (char *) elf_phdata, size);
577 if (retval != size) {
583 files = current->files; /* Refcounted so ok */
584 retval = unshare_files();
587 if (files == current->files) {
588 put_files_struct(files);
592 /* exec will make our files private anyway, but for the a.out
593 loader stuff we need to do it earlier */
595 retval = get_unused_fd();
598 get_file(bprm->file);
599 fd_install(elf_exec_fileno = retval, bprm->file);
601 elf_ppnt = elf_phdata;
610 for (i = 0; i < loc->elf_ex.e_phnum; i++) {
611 if (elf_ppnt->p_type == PT_INTERP) {
612 /* This is the program interpreter used for
613 * shared libraries - for now assume that this
614 * is an a.out format binary
618 if (elf_ppnt->p_filesz > PATH_MAX ||
619 elf_ppnt->p_filesz < 2)
623 elf_interpreter = (char *) kmalloc(elf_ppnt->p_filesz,
625 if (!elf_interpreter)
628 retval = kernel_read(bprm->file, elf_ppnt->p_offset,
631 if (retval != elf_ppnt->p_filesz) {
634 goto out_free_interp;
636 /* make sure path is NULL terminated */
638 if (elf_interpreter[elf_ppnt->p_filesz - 1] != '\0')
639 goto out_free_interp;
641 /* If the program interpreter is one of these two,
642 * then assume an iBCS2 image. Otherwise assume
643 * a native linux image.
645 if (strcmp(elf_interpreter,"/usr/lib/libc.so.1") == 0 ||
646 strcmp(elf_interpreter,"/usr/lib/ld.so.1") == 0)
647 ibcs2_interpreter = 1;
650 * The early SET_PERSONALITY here is so that the lookup
651 * for the interpreter happens in the namespace of the
652 * to-be-execed image. SET_PERSONALITY can select an
655 * However, SET_PERSONALITY is NOT allowed to switch
656 * this task into the new images's memory mapping
657 * policy - that is, TASK_SIZE must still evaluate to
658 * that which is appropriate to the execing application.
659 * This is because exit_mmap() needs to have TASK_SIZE
660 * evaluate to the size of the old image.
662 * So if (say) a 64-bit application is execing a 32-bit
663 * application it is the architecture's responsibility
664 * to defer changing the value of TASK_SIZE until the
665 * switch really is going to happen - do this in
666 * flush_thread(). - akpm
668 SET_PERSONALITY(loc->elf_ex, ibcs2_interpreter);
670 interpreter = open_exec(elf_interpreter);
671 retval = PTR_ERR(interpreter);
672 if (IS_ERR(interpreter))
673 goto out_free_interp;
674 retval = kernel_read(interpreter, 0, bprm->buf, BINPRM_BUF_SIZE);
675 if (retval != BINPRM_BUF_SIZE) {
678 goto out_free_dentry;
681 /* Get the exec headers */
682 loc->interp_ex = *((struct exec *) bprm->buf);
683 loc->interp_elf_ex = *((struct elfhdr *) bprm->buf);
689 elf_ppnt = elf_phdata;
690 executable_stack = EXSTACK_DEFAULT;
692 for (i = 0; i < loc->elf_ex.e_phnum; i++, elf_ppnt++)
693 if (elf_ppnt->p_type == PT_GNU_STACK) {
694 if (elf_ppnt->p_flags & PF_X)
695 executable_stack = EXSTACK_ENABLE_X;
697 executable_stack = EXSTACK_DISABLE_X;
700 have_pt_gnu_stack = (i < loc->elf_ex.e_phnum);
704 if (current->personality == PER_LINUX)
705 switch (exec_shield) {
707 if (executable_stack == EXSTACK_DISABLE_X) {
708 current->flags |= PF_RELOCEXEC;
709 relocexec = PF_RELOCEXEC;
714 executable_stack = EXSTACK_DISABLE_X;
715 current->flags |= PF_RELOCEXEC;
716 relocexec = PF_RELOCEXEC;
720 /* Some simple consistency checks for the interpreter */
721 if (elf_interpreter) {
722 interpreter_type = INTERPRETER_ELF | INTERPRETER_AOUT;
724 /* Now figure out which format our binary is */
725 if ((N_MAGIC(loc->interp_ex) != OMAGIC) &&
726 (N_MAGIC(loc->interp_ex) != ZMAGIC) &&
727 (N_MAGIC(loc->interp_ex) != QMAGIC))
728 interpreter_type = INTERPRETER_ELF;
730 if (memcmp(loc->interp_elf_ex.e_ident, ELFMAG, SELFMAG) != 0)
731 interpreter_type &= ~INTERPRETER_ELF;
734 if (!interpreter_type)
735 goto out_free_dentry;
737 /* Make sure only one type was selected */
738 if ((interpreter_type & INTERPRETER_ELF) &&
739 interpreter_type != INTERPRETER_ELF) {
740 // FIXME - ratelimit this before re-enabling
741 // printk(KERN_WARNING "ELF: Ambiguous type, using ELF\n");
742 interpreter_type = INTERPRETER_ELF;
744 /* Verify the interpreter has a valid arch */
745 if ((interpreter_type == INTERPRETER_ELF) &&
746 !elf_check_arch(&loc->interp_elf_ex))
747 goto out_free_dentry;
749 /* Executables without an interpreter also need a personality */
750 SET_PERSONALITY(loc->elf_ex, ibcs2_interpreter);
753 /* OK, we are done with that, now set up the arg stuff,
754 and then start this sucker up */
756 if ((!bprm->sh_bang) && (interpreter_type == INTERPRETER_AOUT)) {
757 char *passed_p = passed_fileno;
758 sprintf(passed_fileno, "%d", elf_exec_fileno);
760 if (elf_interpreter) {
761 retval = copy_strings_kernel(1, &passed_p, bprm);
763 goto out_free_dentry;
768 /* Flush all traces of the currently running executable */
769 retval = flush_old_exec(bprm);
771 goto out_free_dentry;
772 current->flags |= relocexec;
776 * Turn off the CS limit completely if exec-shield disabled or
779 if (!exec_shield || executable_stack != EXSTACK_DISABLE_X)
780 arch_add_exec_range(current->mm, -1);
783 /* Discard our unneeded old files struct */
786 put_files_struct(files);
790 /* OK, This is the point of no return */
791 current->mm->start_data = 0;
792 current->mm->end_data = 0;
793 current->mm->end_code = 0;
794 current->mm->mmap = NULL;
795 current->flags &= ~PF_FORKNOEXEC;
796 current->mm->def_flags = def_flags;
798 /* Do this immediately, since STACK_TOP as used in setup_arg_pages
799 may depend on the personality. */
800 SET_PERSONALITY(loc->elf_ex, ibcs2_interpreter);
801 if (elf_read_implies_exec(loc->elf_ex, have_pt_gnu_stack))
802 current->personality |= READ_IMPLIES_EXEC;
804 arch_pick_mmap_layout(current->mm);
806 /* Do this so that we can load the interpreter, if need be. We will
807 change some of these later */
808 // current->mm->rss = 0;
809 vx_rsspages_sub(current->mm, current->mm->rss);
810 current->mm->free_area_cache = current->mm->mmap_base;
811 retval = setup_arg_pages(bprm, executable_stack);
813 send_sig(SIGKILL, current, 0);
814 goto out_free_dentry;
817 current->mm->start_stack = bprm->p;
820 /* Now we do a little grungy work by mmaping the ELF image into
821 the correct location in memory.
824 for(i = 0, elf_ppnt = elf_phdata; i < loc->elf_ex.e_phnum; i++, elf_ppnt++) {
825 int elf_prot = 0, elf_flags;
826 unsigned long k, vaddr;
828 if (elf_ppnt->p_type != PT_LOAD)
831 if (unlikely (elf_brk > elf_bss)) {
834 /* There was a PT_LOAD segment with p_memsz > p_filesz
835 before this one. Map anonymous pages, if needed,
836 and clear the area. */
837 retval = set_brk (elf_bss + load_bias,
838 elf_brk + load_bias);
840 send_sig(SIGKILL, current, 0);
841 goto out_free_dentry;
843 nbyte = ELF_PAGEOFFSET(elf_bss);
845 nbyte = ELF_MIN_ALIGN - nbyte;
846 if (nbyte > elf_brk - elf_bss)
847 nbyte = elf_brk - elf_bss;
848 clear_user((void __user *) elf_bss + load_bias, nbyte);
852 if (elf_ppnt->p_flags & PF_R) elf_prot |= PROT_READ;
853 if (elf_ppnt->p_flags & PF_W) elf_prot |= PROT_WRITE;
854 if (elf_ppnt->p_flags & PF_X) elf_prot |= PROT_EXEC;
856 elf_flags = MAP_PRIVATE|MAP_DENYWRITE|MAP_EXECUTABLE;
858 vaddr = elf_ppnt->p_vaddr;
859 if (loc->elf_ex.e_type == ET_EXEC || load_addr_set)
860 elf_flags |= MAP_FIXED;
861 else if (loc->elf_ex.e_type == ET_DYN)
865 load_bias = ELF_PAGESTART(ELF_ET_DYN_BASE - vaddr);
868 error = elf_map(bprm->file, load_bias + vaddr, elf_ppnt, elf_prot, elf_flags, 0);
869 if (BAD_ADDR(error)) {
870 send_sig(SIGKILL, current, 0);
871 goto out_free_dentry;
874 if (!load_addr_set) {
876 load_addr = (elf_ppnt->p_vaddr - elf_ppnt->p_offset);
877 if (loc->elf_ex.e_type == ET_DYN) {
879 ELF_PAGESTART(load_bias + vaddr);
880 load_addr += load_bias;
881 reloc_func_desc = load_bias;
884 k = elf_ppnt->p_vaddr;
885 if (k < start_code) start_code = k;
886 if (start_data < k) start_data = k;
889 * Check to see if the section's size will overflow the
890 * allowed task size. Note that p_filesz must always be
891 * <= p_memsz so it is only necessary to check p_memsz.
893 if (k > TASK_SIZE || elf_ppnt->p_filesz > elf_ppnt->p_memsz ||
894 elf_ppnt->p_memsz > TASK_SIZE ||
895 TASK_SIZE - elf_ppnt->p_memsz < k) {
896 /* set_brk can never work. Avoid overflows. */
897 send_sig(SIGKILL, current, 0);
898 goto out_free_dentry;
901 k = elf_ppnt->p_vaddr + elf_ppnt->p_filesz;
905 if ((elf_ppnt->p_flags & PF_X) && end_code < k)
909 k = elf_ppnt->p_vaddr + elf_ppnt->p_memsz;
914 loc->elf_ex.e_entry += load_bias;
915 elf_bss += load_bias;
916 elf_brk += load_bias;
917 start_code += load_bias;
918 end_code += load_bias;
919 start_data += load_bias;
920 end_data += load_bias;
922 /* Calling set_brk effectively mmaps the pages that we need
923 * for the bss and break sections. We must do this before
924 * mapping in the interpreter, to make sure it doesn't wind
925 * up getting placed where the bss needs to go.
927 retval = set_brk(elf_bss, elf_brk);
929 send_sig(SIGKILL, current, 0);
930 goto out_free_dentry;
934 if (elf_interpreter) {
935 if (interpreter_type == INTERPRETER_AOUT)
936 elf_entry = load_aout_interp(&loc->interp_ex,
939 elf_entry = load_elf_interp(&loc->interp_elf_ex,
943 if (BAD_ADDR(elf_entry)) {
944 printk(KERN_ERR "Unable to load interpreter %.128s\n",
946 force_sig(SIGSEGV, current);
947 retval = -ENOEXEC; /* Nobody gets to see this, but.. */
948 goto out_free_dentry;
950 reloc_func_desc = interp_load_addr;
952 allow_write_access(interpreter);
954 kfree(elf_interpreter);
956 elf_entry = loc->elf_ex.e_entry;
961 if (interpreter_type != INTERPRETER_AOUT)
962 sys_close(elf_exec_fileno);
964 set_binfmt(&elf_format);
967 * Map the vsyscall trampoline. This address is then passed via
970 #ifdef __HAVE_ARCH_VSYSCALL
975 current->flags &= ~PF_FORKNOEXEC;
976 create_elf_tables(bprm, &loc->elf_ex, (interpreter_type == INTERPRETER_AOUT),
977 load_addr, interp_load_addr);
978 /* N.B. passed_fileno might not be initialized? */
979 if (interpreter_type == INTERPRETER_AOUT)
980 current->mm->arg_start += strlen(passed_fileno) + 1;
981 current->mm->end_code = end_code;
982 current->mm->start_code = start_code;
983 current->mm->start_data = start_data;
984 current->mm->end_data = end_data;
985 current->mm->start_stack = bprm->p;
987 #ifdef __HAVE_ARCH_RANDOMIZE_BRK
988 if (current->flags & PF_RELOCEXEC)
989 randomize_brk(elf_brk);
991 if (current->personality & MMAP_PAGE_ZERO) {
992 /* Why this, you ask??? Well SVr4 maps page 0 as read-only,
993 and some applications "depend" upon this behavior.
994 Since we do not have the power to recompile these, we
995 emulate the SVr4 behavior. Sigh. */
996 down_write(¤t->mm->mmap_sem);
997 error = do_mmap(NULL, 0, PAGE_SIZE, PROT_READ | PROT_EXEC,
998 MAP_FIXED | MAP_PRIVATE, 0);
999 up_write(¤t->mm->mmap_sem);
1002 #ifdef ELF_PLAT_INIT
1004 * The ABI may specify that certain registers be set up in special
1005 * ways (on i386 %edx is the address of a DT_FINI function, for
1006 * example. In addition, it may also specify (eg, PowerPC64 ELF)
1007 * that the e_entry field is the address of the function descriptor
1008 * for the startup routine, rather than the address of the startup
1009 * routine itself. This macro performs whatever initialization to
1010 * the regs structure is required as well as any relocations to the
1011 * function descriptor entries when executing dynamically links apps.
1013 ELF_PLAT_INIT(regs, reloc_func_desc);
1016 start_thread(regs, elf_entry, bprm->p);
1017 if (unlikely(current->ptrace & PT_PTRACED)) {
1018 if (current->ptrace & PT_TRACE_EXEC)
1019 ptrace_notify ((PTRACE_EVENT_EXEC << 8) | SIGTRAP);
1021 send_sig(SIGTRAP, current, 0);
1031 allow_write_access(interpreter);
1035 if (elf_interpreter)
1036 kfree(elf_interpreter);
1038 sys_close(elf_exec_fileno);
1041 put_files_struct(current->files);
1042 current->files = files;
1046 current->flags &= ~PF_RELOCEXEC;
1047 current->flags |= old_relocexec;
1051 /* This is really simpleminded and specialized - we are loading an
1052 a.out library that is given an ELF header. */
1054 static int load_elf_library(struct file *file)
1056 struct elf_phdr *elf_phdata;
1057 unsigned long elf_bss, bss, len;
1058 int retval, error, i, j;
1059 struct elfhdr elf_ex;
1062 retval = kernel_read(file, 0, (char *) &elf_ex, sizeof(elf_ex));
1063 if (retval != sizeof(elf_ex))
1066 if (memcmp(elf_ex.e_ident, ELFMAG, SELFMAG) != 0)
1069 /* First of all, some simple consistency checks */
1070 if (elf_ex.e_type != ET_EXEC || elf_ex.e_phnum > 2 ||
1071 !elf_check_arch(&elf_ex) || !file->f_op || !file->f_op->mmap)
1074 /* Now read in all of the header information */
1076 j = sizeof(struct elf_phdr) * elf_ex.e_phnum;
1077 /* j < ELF_MIN_ALIGN because elf_ex.e_phnum <= 2 */
1080 elf_phdata = (struct elf_phdr *) kmalloc(j, GFP_KERNEL);
1085 retval = kernel_read(file, elf_ex.e_phoff, (char *) elf_phdata, j);
1089 for (j = 0, i = 0; i<elf_ex.e_phnum; i++)
1090 if ((elf_phdata + i)->p_type == PT_LOAD) j++;
1094 while (elf_phdata->p_type != PT_LOAD) elf_phdata++;
1096 /* Now use mmap to map the library into memory. */
1097 down_write(¤t->mm->mmap_sem);
1098 error = do_mmap(file,
1099 ELF_PAGESTART(elf_phdata->p_vaddr),
1100 (elf_phdata->p_filesz +
1101 ELF_PAGEOFFSET(elf_phdata->p_vaddr)),
1102 PROT_READ | PROT_WRITE | PROT_EXEC,
1103 MAP_FIXED | MAP_PRIVATE | MAP_DENYWRITE,
1104 (elf_phdata->p_offset -
1105 ELF_PAGEOFFSET(elf_phdata->p_vaddr)));
1106 up_write(¤t->mm->mmap_sem);
1107 if (error != ELF_PAGESTART(elf_phdata->p_vaddr))
1110 elf_bss = elf_phdata->p_vaddr + elf_phdata->p_filesz;
1113 len = ELF_PAGESTART(elf_phdata->p_filesz + elf_phdata->p_vaddr + ELF_MIN_ALIGN - 1);
1114 bss = elf_phdata->p_memsz + elf_phdata->p_vaddr;
1116 do_brk(len, bss - len);
1126 * Note that some platforms still use traditional core dumps and not
1127 * the ELF core dump. Each platform can select it as appropriate.
1129 #ifdef USE_ELF_CORE_DUMP
1134 * Modelled on fs/exec.c:aout_core_dump()
1135 * Jeremy Fitzhardinge <jeremy@sw.oz.au>
1138 * These are the only things you should do on a core-file: use only these
1139 * functions to write out all the necessary info.
1141 static int dump_write(struct file *file, const void *addr, int nr)
1143 return file->f_op->write(file, addr, nr, &file->f_pos) == nr;
1146 static int dump_seek(struct file *file, off_t off)
1148 if (file->f_op->llseek) {
1149 if (file->f_op->llseek(file, off, 0) != off)
1157 * Decide whether a segment is worth dumping; default is yes to be
1158 * sure (missing info is worse than too much; etc).
1159 * Personally I'd include everything, and use the coredump limit...
1161 * I think we should skip something. But I am not sure how. H.J.
1163 static int maydump(struct vm_area_struct *vma)
1165 /* Do not dump I/O mapped devices, shared memory, or special mappings */
1166 if (vma->vm_flags & (VM_IO | VM_SHARED | VM_RESERVED))
1169 /* If it hasn't been written to, don't write it out */
1176 #define roundup(x, y) ((((x)+((y)-1))/(y))*(y))
1178 /* An ELF note in memory */
1183 unsigned int datasz;
1187 static int notesize(struct memelfnote *en)
1191 sz = sizeof(struct elf_note);
1192 sz += roundup(strlen(en->name) + 1, 4);
1193 sz += roundup(en->datasz, 4);
1198 #define DUMP_WRITE(addr, nr) \
1199 do { if (!dump_write(file, (addr), (nr))) return 0; } while(0)
1200 #define DUMP_SEEK(off) \
1201 do { if (!dump_seek(file, (off))) return 0; } while(0)
1203 static int writenote(struct memelfnote *men, struct file *file)
1207 en.n_namesz = strlen(men->name) + 1;
1208 en.n_descsz = men->datasz;
1209 en.n_type = men->type;
1211 DUMP_WRITE(&en, sizeof(en));
1212 DUMP_WRITE(men->name, en.n_namesz);
1213 /* XXX - cast from long long to long to avoid need for libgcc.a */
1214 DUMP_SEEK(roundup((unsigned long)file->f_pos, 4)); /* XXX */
1215 DUMP_WRITE(men->data, men->datasz);
1216 DUMP_SEEK(roundup((unsigned long)file->f_pos, 4)); /* XXX */
1223 #define DUMP_WRITE(addr, nr) \
1224 if ((size += (nr)) > limit || !dump_write(file, (addr), (nr))) \
1226 #define DUMP_SEEK(off) \
1227 if (!dump_seek(file, (off))) \
1230 static inline void fill_elf_header(struct elfhdr *elf, int segs)
1232 memcpy(elf->e_ident, ELFMAG, SELFMAG);
1233 elf->e_ident[EI_CLASS] = ELF_CLASS;
1234 elf->e_ident[EI_DATA] = ELF_DATA;
1235 elf->e_ident[EI_VERSION] = EV_CURRENT;
1236 elf->e_ident[EI_OSABI] = ELF_OSABI;
1237 memset(elf->e_ident+EI_PAD, 0, EI_NIDENT-EI_PAD);
1239 elf->e_type = ET_CORE;
1240 elf->e_machine = ELF_ARCH;
1241 elf->e_version = EV_CURRENT;
1243 elf->e_phoff = sizeof(struct elfhdr);
1246 elf->e_ehsize = sizeof(struct elfhdr);
1247 elf->e_phentsize = sizeof(struct elf_phdr);
1248 elf->e_phnum = segs;
1249 elf->e_shentsize = 0;
1251 elf->e_shstrndx = 0;
1255 static inline void fill_elf_note_phdr(struct elf_phdr *phdr, int sz, off_t offset)
1257 phdr->p_type = PT_NOTE;
1258 phdr->p_offset = offset;
1261 phdr->p_filesz = sz;
1268 static void fill_note(struct memelfnote *note, const char *name, int type,
1269 unsigned int sz, void *data)
1279 * fill up all the fields in prstatus from the given task struct, except registers
1280 * which need to be filled up separately.
1282 static void fill_prstatus(struct elf_prstatus *prstatus,
1283 struct task_struct *p, long signr)
1285 prstatus->pr_info.si_signo = prstatus->pr_cursig = signr;
1286 prstatus->pr_sigpend = p->pending.signal.sig[0];
1287 prstatus->pr_sighold = p->blocked.sig[0];
1288 prstatus->pr_pid = p->pid;
1289 prstatus->pr_ppid = p->parent->pid;
1290 prstatus->pr_pgrp = process_group(p);
1291 prstatus->pr_sid = p->signal->session;
1292 if (p->pid == p->tgid) {
1294 * This is the record for the group leader. Add in the
1295 * cumulative times of previous dead threads. This total
1296 * won't include the time of each live thread whose state
1297 * is included in the core dump. The final total reported
1298 * to our parent process when it calls wait4 will include
1299 * those sums as well as the little bit more time it takes
1300 * this and each other thread to finish dying after the
1301 * core dump synchronization phase.
1303 jiffies_to_timeval(p->utime + p->signal->utime,
1304 &prstatus->pr_utime);
1305 jiffies_to_timeval(p->stime + p->signal->stime,
1306 &prstatus->pr_stime);
1308 jiffies_to_timeval(p->utime, &prstatus->pr_utime);
1309 jiffies_to_timeval(p->stime, &prstatus->pr_stime);
1311 jiffies_to_timeval(p->signal->cutime, &prstatus->pr_cutime);
1312 jiffies_to_timeval(p->signal->cstime, &prstatus->pr_cstime);
1315 static void fill_psinfo(struct elf_prpsinfo *psinfo, struct task_struct *p,
1316 struct mm_struct *mm)
1320 /* first copy the parameters from user space */
1321 memset(psinfo, 0, sizeof(struct elf_prpsinfo));
1323 len = mm->arg_end - mm->arg_start;
1324 if (len >= ELF_PRARGSZ)
1325 len = ELF_PRARGSZ-1;
1326 copy_from_user(&psinfo->pr_psargs,
1327 (const char __user *)mm->arg_start, len);
1328 for(i = 0; i < len; i++)
1329 if (psinfo->pr_psargs[i] == 0)
1330 psinfo->pr_psargs[i] = ' ';
1331 psinfo->pr_psargs[len] = 0;
1333 psinfo->pr_pid = p->pid;
1334 psinfo->pr_ppid = p->parent->pid;
1335 psinfo->pr_pgrp = process_group(p);
1336 psinfo->pr_sid = p->signal->session;
1338 i = p->state ? ffz(~p->state) + 1 : 0;
1339 psinfo->pr_state = i;
1340 psinfo->pr_sname = (i < 0 || i > 5) ? '.' : "RSDTZW"[i];
1341 psinfo->pr_zomb = psinfo->pr_sname == 'Z';
1342 psinfo->pr_nice = task_nice(p);
1343 psinfo->pr_flag = p->flags;
1344 SET_UID(psinfo->pr_uid, p->uid);
1345 SET_GID(psinfo->pr_gid, p->gid);
1346 strncpy(psinfo->pr_fname, p->comm, sizeof(psinfo->pr_fname));
1351 /* Here is the structure in which status of each thread is captured. */
1352 struct elf_thread_status
1354 struct list_head list;
1355 struct elf_prstatus prstatus; /* NT_PRSTATUS */
1356 elf_fpregset_t fpu; /* NT_PRFPREG */
1357 struct task_struct *thread;
1358 #ifdef ELF_CORE_COPY_XFPREGS
1359 elf_fpxregset_t xfpu; /* NT_PRXFPREG */
1361 struct memelfnote notes[3];
1366 * In order to add the specific thread information for the elf file format,
1367 * we need to keep a linked list of every threads pr_status and then
1368 * create a single section for them in the final core file.
1370 static int elf_dump_thread_status(long signr, struct elf_thread_status *t)
1373 struct task_struct *p = t->thread;
1376 fill_prstatus(&t->prstatus, p, signr);
1377 elf_core_copy_task_regs(p, &t->prstatus.pr_reg);
1379 fill_note(&t->notes[0], "CORE", NT_PRSTATUS, sizeof(t->prstatus), &(t->prstatus));
1381 sz += notesize(&t->notes[0]);
1383 if ((t->prstatus.pr_fpvalid = elf_core_copy_task_fpregs(p, NULL, &t->fpu))) {
1384 fill_note(&t->notes[1], "CORE", NT_PRFPREG, sizeof(t->fpu), &(t->fpu));
1386 sz += notesize(&t->notes[1]);
1389 #ifdef ELF_CORE_COPY_XFPREGS
1390 if (elf_core_copy_task_xfpregs(p, &t->xfpu)) {
1391 fill_note(&t->notes[2], "LINUX", NT_PRXFPREG, sizeof(t->xfpu), &t->xfpu);
1393 sz += notesize(&t->notes[2]);
1402 * This is a two-pass process; first we find the offsets of the bits,
1403 * and then they are actually written out. If we run out of core limit
1406 static int elf_core_dump(long signr, struct pt_regs * regs, struct file * file)
1414 struct vm_area_struct *vma;
1415 struct elfhdr *elf = NULL;
1416 off_t offset = 0, dataoff;
1417 unsigned long limit = current->signal->rlim[RLIMIT_CORE].rlim_cur;
1419 struct memelfnote *notes = NULL;
1420 struct elf_prstatus *prstatus = NULL; /* NT_PRSTATUS */
1421 struct elf_prpsinfo *psinfo = NULL; /* NT_PRPSINFO */
1422 struct task_struct *g, *p;
1423 LIST_HEAD(thread_list);
1424 struct list_head *t;
1425 elf_fpregset_t *fpu = NULL;
1426 #ifdef ELF_CORE_COPY_XFPREGS
1427 elf_fpxregset_t *xfpu = NULL;
1429 int thread_status_size = 0;
1433 * We no longer stop all VM operations.
1435 * This is because those proceses that could possibly change map_count or
1436 * the mmap / vma pages are now blocked in do_exit on current finishing
1439 * Only ptrace can touch these memory addresses, but it doesn't change
1440 * the map_count or the pages allocated. So no possibility of crashing
1441 * exists while dumping the mm->vm_next areas to the core file.
1444 /* alloc memory for large data structures: too large to be on stack */
1445 elf = kmalloc(sizeof(*elf), GFP_KERNEL);
1448 prstatus = kmalloc(sizeof(*prstatus), GFP_KERNEL);
1451 psinfo = kmalloc(sizeof(*psinfo), GFP_KERNEL);
1454 notes = kmalloc(NUM_NOTES * sizeof(struct memelfnote), GFP_KERNEL);
1457 fpu = kmalloc(sizeof(*fpu), GFP_KERNEL);
1460 #ifdef ELF_CORE_COPY_XFPREGS
1461 xfpu = kmalloc(sizeof(*xfpu), GFP_KERNEL);
1467 struct elf_thread_status *tmp;
1468 read_lock(&tasklist_lock);
1470 if (current->mm == p->mm && current != p) {
1471 tmp = kmalloc(sizeof(*tmp), GFP_ATOMIC);
1473 read_unlock(&tasklist_lock);
1476 memset(tmp, 0, sizeof(*tmp));
1477 INIT_LIST_HEAD(&tmp->list);
1479 list_add(&tmp->list, &thread_list);
1481 while_each_thread(g,p);
1482 read_unlock(&tasklist_lock);
1483 list_for_each(t, &thread_list) {
1484 struct elf_thread_status *tmp;
1487 tmp = list_entry(t, struct elf_thread_status, list);
1488 sz = elf_dump_thread_status(signr, tmp);
1489 thread_status_size += sz;
1492 /* now collect the dump for the current */
1493 memset(prstatus, 0, sizeof(*prstatus));
1494 fill_prstatus(prstatus, current, signr);
1495 elf_core_copy_regs(&prstatus->pr_reg, regs);
1497 segs = current->mm->map_count;
1498 #ifdef ELF_CORE_EXTRA_PHDRS
1499 segs += ELF_CORE_EXTRA_PHDRS;
1503 fill_elf_header(elf, segs+1); /* including notes section */
1506 current->flags |= PF_DUMPCORE;
1509 * Set up the notes in similar form to SVR4 core dumps made
1510 * with info from their /proc.
1513 fill_note(notes +0, "CORE", NT_PRSTATUS, sizeof(*prstatus), prstatus);
1515 fill_psinfo(psinfo, current->group_leader, current->mm);
1516 fill_note(notes +1, "CORE", NT_PRPSINFO, sizeof(*psinfo), psinfo);
1518 fill_note(notes +2, "CORE", NT_TASKSTRUCT, sizeof(*current), current);
1522 auxv = (elf_addr_t *) current->mm->saved_auxv;
1527 while (auxv[i - 2] != AT_NULL);
1528 fill_note(¬es[numnote++], "CORE", NT_AUXV,
1529 i * sizeof (elf_addr_t), auxv);
1531 /* Try to dump the FPU. */
1532 if ((prstatus->pr_fpvalid = elf_core_copy_task_fpregs(current, regs, fpu)))
1533 fill_note(notes + numnote++,
1534 "CORE", NT_PRFPREG, sizeof(*fpu), fpu);
1535 #ifdef ELF_CORE_COPY_XFPREGS
1536 if (elf_core_copy_task_xfpregs(current, xfpu))
1537 fill_note(notes + numnote++,
1538 "LINUX", NT_PRXFPREG, sizeof(*xfpu), xfpu);
1544 DUMP_WRITE(elf, sizeof(*elf));
1545 offset += sizeof(*elf); /* Elf header */
1546 offset += (segs+1) * sizeof(struct elf_phdr); /* Program headers */
1548 /* Write notes phdr entry */
1550 struct elf_phdr phdr;
1553 for (i = 0; i < numnote; i++)
1554 sz += notesize(notes + i);
1556 sz += thread_status_size;
1558 fill_elf_note_phdr(&phdr, sz, offset);
1560 DUMP_WRITE(&phdr, sizeof(phdr));
1563 /* Page-align dumped data */
1564 dataoff = offset = roundup(offset, ELF_EXEC_PAGESIZE);
1566 /* Write program headers for segments dump */
1567 for (vma = current->mm->mmap; vma != NULL; vma = vma->vm_next) {
1568 struct elf_phdr phdr;
1571 sz = vma->vm_end - vma->vm_start;
1573 phdr.p_type = PT_LOAD;
1574 phdr.p_offset = offset;
1575 phdr.p_vaddr = vma->vm_start;
1577 phdr.p_filesz = maydump(vma) ? sz : 0;
1579 offset += phdr.p_filesz;
1580 phdr.p_flags = vma->vm_flags & VM_READ ? PF_R : 0;
1581 if (vma->vm_flags & VM_WRITE) phdr.p_flags |= PF_W;
1582 if (vma->vm_flags & VM_EXEC) phdr.p_flags |= PF_X;
1583 phdr.p_align = ELF_EXEC_PAGESIZE;
1585 DUMP_WRITE(&phdr, sizeof(phdr));
1588 #ifdef ELF_CORE_WRITE_EXTRA_PHDRS
1589 ELF_CORE_WRITE_EXTRA_PHDRS;
1592 /* write out the notes section */
1593 for (i = 0; i < numnote; i++)
1594 if (!writenote(notes + i, file))
1597 /* write out the thread status notes section */
1598 list_for_each(t, &thread_list) {
1599 struct elf_thread_status *tmp = list_entry(t, struct elf_thread_status, list);
1600 for (i = 0; i < tmp->num_notes; i++)
1601 if (!writenote(&tmp->notes[i], file))
1607 for (vma = current->mm->mmap; vma != NULL; vma = vma->vm_next) {
1613 for (addr = vma->vm_start;
1615 addr += PAGE_SIZE) {
1617 struct vm_area_struct *vma;
1619 if (get_user_pages(current, current->mm, addr, 1, 0, 1,
1620 &page, &vma) <= 0) {
1621 DUMP_SEEK (file->f_pos + PAGE_SIZE);
1623 if (page == ZERO_PAGE(addr)) {
1624 DUMP_SEEK (file->f_pos + PAGE_SIZE);
1627 flush_cache_page(vma, addr);
1629 if ((size += PAGE_SIZE) > limit ||
1630 !dump_write(file, kaddr,
1633 page_cache_release(page);
1638 page_cache_release(page);
1643 #ifdef ELF_CORE_WRITE_EXTRA_DATA
1644 ELF_CORE_WRITE_EXTRA_DATA;
1647 if ((off_t) file->f_pos != offset) {
1649 printk("elf_core_dump: file->f_pos (%ld) != offset (%ld)\n",
1650 (off_t) file->f_pos, offset);
1657 while(!list_empty(&thread_list)) {
1658 struct list_head *tmp = thread_list.next;
1660 kfree(list_entry(tmp, struct elf_thread_status, list));
1668 #ifdef ELF_CORE_COPY_XFPREGS
1675 #endif /* USE_ELF_CORE_DUMP */
1677 static int __init init_elf_binfmt(void)
1679 return register_binfmt(&elf_format);
1682 static void __exit exit_elf_binfmt(void)
1684 /* Remove the COFF and ELF loaders. */
1685 unregister_binfmt(&elf_format);
1688 core_initcall(init_elf_binfmt);
1689 module_exit(exit_elf_binfmt);
1690 MODULE_LICENSE("GPL");