2 * linux/fs/binfmt_elf.c
4 * These are the functions used to load ELF format executables as used
5 * on SVr4 machines. Information on the format may be found in the book
6 * "UNIX SYSTEM V RELEASE 4 Programmers Guide: Ansi C and Programming Support
9 * Copyright 1993, 1994: Eric Youngdale (ericy@cais.com).
12 #include <linux/module.h>
13 #include <linux/kernel.h>
15 #include <linux/stat.h>
16 #include <linux/time.h>
18 #include <linux/mman.h>
19 #include <linux/a.out.h>
20 #include <linux/errno.h>
21 #include <linux/signal.h>
22 #include <linux/binfmts.h>
23 #include <linux/string.h>
24 #include <linux/file.h>
25 #include <linux/fcntl.h>
26 #include <linux/ptrace.h>
27 #include <linux/slab.h>
28 #include <linux/shm.h>
29 #include <linux/personality.h>
30 #include <linux/elfcore.h>
31 #include <linux/init.h>
32 #include <linux/highuid.h>
33 #include <linux/smp.h>
34 #include <linux/smp_lock.h>
35 #include <linux/compiler.h>
36 #include <linux/highmem.h>
37 #include <linux/pagemap.h>
38 #include <linux/security.h>
39 #include <linux/syscalls.h>
40 #include <linux/vs_memory.h>
42 #include <asm/uaccess.h>
43 #include <asm/param.h>
46 #include <linux/elf.h>
48 static int load_elf_binary(struct linux_binprm * bprm, struct pt_regs * regs);
49 static int load_elf_library(struct file*);
50 static unsigned long elf_map (struct file *, unsigned long, struct elf_phdr *, int, int, unsigned long);
51 extern int dump_fpu (struct pt_regs *, elf_fpregset_t *);
54 #define elf_addr_t unsigned long
58 * If we don't support core dumping, then supply a NULL so we
61 #ifdef USE_ELF_CORE_DUMP
62 static int elf_core_dump(long signr, struct pt_regs * regs, struct file * file);
64 #define elf_core_dump NULL
67 #if ELF_EXEC_PAGESIZE > PAGE_SIZE
68 # define ELF_MIN_ALIGN ELF_EXEC_PAGESIZE
70 # define ELF_MIN_ALIGN PAGE_SIZE
73 #define ELF_PAGESTART(_v) ((_v) & ~(unsigned long)(ELF_MIN_ALIGN-1))
74 #define ELF_PAGEOFFSET(_v) ((_v) & (ELF_MIN_ALIGN-1))
75 #define ELF_PAGEALIGN(_v) (((_v) + ELF_MIN_ALIGN - 1) & ~(ELF_MIN_ALIGN - 1))
77 static struct linux_binfmt elf_format = {
78 .module = THIS_MODULE,
79 .load_binary = load_elf_binary,
80 .load_shlib = load_elf_library,
81 .core_dump = elf_core_dump,
82 .min_coredump = ELF_EXEC_PAGESIZE
85 #define BAD_ADDR(x) ((unsigned long)(x) > TASK_SIZE)
87 static int set_brk(unsigned long start, unsigned long end)
89 start = ELF_PAGEALIGN(start);
90 end = ELF_PAGEALIGN(end);
92 unsigned long addr = do_brk(start, end - start);
96 current->mm->start_brk = current->mm->brk = end;
101 /* We need to explicitly zero any fractional pages
102 after the data section (i.e. bss). This would
103 contain the junk from the file that should not
107 static void padzero(unsigned long elf_bss)
111 nbyte = ELF_PAGEOFFSET(elf_bss);
113 nbyte = ELF_MIN_ALIGN - nbyte;
114 clear_user((void __user *) elf_bss, nbyte);
118 /* Let's use some macros to make this stack manipulation a litle clearer */
119 #ifdef CONFIG_STACK_GROWSUP
120 #define STACK_ADD(sp, items) ((elf_addr_t __user *)(sp) + (items))
121 #define STACK_ROUND(sp, items) \
122 ((15 + (unsigned long) ((sp) + (items))) &~ 15UL)
123 #define STACK_ALLOC(sp, len) ({ elf_addr_t __user *old_sp = (elf_addr_t __user *)sp; sp += len; old_sp; })
125 #define STACK_ADD(sp, items) ((elf_addr_t __user *)(sp) - (items))
126 #define STACK_ROUND(sp, items) \
127 (((unsigned long) (sp - items)) &~ 15UL)
128 #define STACK_ALLOC(sp, len) ({ sp -= len ; sp; })
132 create_elf_tables(struct linux_binprm *bprm, struct elfhdr * exec,
133 int interp_aout, unsigned long load_addr,
134 unsigned long interp_load_addr)
136 unsigned long p = bprm->p;
137 int argc = bprm->argc;
138 int envc = bprm->envc;
139 elf_addr_t __user *argv;
140 elf_addr_t __user *envp;
141 elf_addr_t __user *sp;
142 elf_addr_t __user *u_platform;
143 const char *k_platform = ELF_PLATFORM;
145 elf_addr_t *elf_info;
147 struct task_struct *tsk = current;
150 * If this architecture has a platform capability string, copy it
151 * to userspace. In some cases (Sparc), this info is impossible
152 * for userspace to get any other way, in others (i386) it is
158 size_t len = strlen(k_platform) + 1;
160 #ifdef __HAVE_ARCH_ALIGN_STACK
161 p = (unsigned long)arch_align_stack((unsigned long)p);
163 u_platform = (elf_addr_t __user *)STACK_ALLOC(p, len);
164 __copy_to_user(u_platform, k_platform, len);
167 /* Create the ELF interpreter info */
168 elf_info = (elf_addr_t *) current->mm->saved_auxv;
169 #define NEW_AUX_ENT(id, val) \
170 do { elf_info[ei_index++] = id; elf_info[ei_index++] = val; } while (0)
174 * ARCH_DLINFO must come first so PPC can do its special alignment of
179 NEW_AUX_ENT(AT_HWCAP, ELF_HWCAP);
180 NEW_AUX_ENT(AT_PAGESZ, ELF_EXEC_PAGESIZE);
181 NEW_AUX_ENT(AT_CLKTCK, CLOCKS_PER_SEC);
182 NEW_AUX_ENT(AT_PHDR, load_addr + exec->e_phoff);
183 NEW_AUX_ENT(AT_PHENT, sizeof (struct elf_phdr));
184 NEW_AUX_ENT(AT_PHNUM, exec->e_phnum);
185 NEW_AUX_ENT(AT_BASE, interp_load_addr);
186 NEW_AUX_ENT(AT_FLAGS, 0);
187 NEW_AUX_ENT(AT_ENTRY, exec->e_entry);
188 NEW_AUX_ENT(AT_UID, (elf_addr_t) tsk->uid);
189 NEW_AUX_ENT(AT_EUID, (elf_addr_t) tsk->euid);
190 NEW_AUX_ENT(AT_GID, (elf_addr_t) tsk->gid);
191 NEW_AUX_ENT(AT_EGID, (elf_addr_t) tsk->egid);
192 NEW_AUX_ENT(AT_SECURE, (elf_addr_t) security_bprm_secureexec(bprm));
194 NEW_AUX_ENT(AT_PLATFORM, (elf_addr_t)(unsigned long)u_platform);
196 if (bprm->interp_flags & BINPRM_FLAGS_EXECFD) {
197 NEW_AUX_ENT(AT_EXECFD, (elf_addr_t) bprm->interp_data);
200 /* AT_NULL is zero; clear the rest too */
201 memset(&elf_info[ei_index], 0,
202 sizeof current->mm->saved_auxv - ei_index * sizeof elf_info[0]);
204 /* And advance past the AT_NULL entry. */
207 sp = STACK_ADD(p, ei_index);
209 items = (argc + 1) + (envc + 1);
211 items += 3; /* a.out interpreters require argv & envp too */
213 items += 1; /* ELF interpreters only put argc on the stack */
215 bprm->p = STACK_ROUND(sp, items);
217 /* Point sp at the lowest address on the stack */
218 #ifdef CONFIG_STACK_GROWSUP
219 sp = (elf_addr_t __user *)bprm->p - items - ei_index;
220 bprm->exec = (unsigned long) sp; /* XXX: PARISC HACK */
222 sp = (elf_addr_t __user *)bprm->p;
225 /* Now, let's put argc (and argv, envp if appropriate) on the stack */
226 __put_user(argc, sp++);
229 envp = argv + argc + 1;
230 __put_user((elf_addr_t)(unsigned long)argv, sp++);
231 __put_user((elf_addr_t)(unsigned long)envp, sp++);
234 envp = argv + argc + 1;
237 /* Populate argv and envp */
238 p = current->mm->arg_start;
241 __put_user((elf_addr_t)p, argv++);
242 len = strnlen_user((void __user *)p, PAGE_SIZE*MAX_ARG_PAGES);
243 if (!len || len > PAGE_SIZE*MAX_ARG_PAGES)
248 current->mm->arg_end = current->mm->env_start = p;
251 __put_user((elf_addr_t)p, envp++);
252 len = strnlen_user((void __user *)p, PAGE_SIZE*MAX_ARG_PAGES);
253 if (!len || len > PAGE_SIZE*MAX_ARG_PAGES)
258 current->mm->env_end = p;
260 /* Put the elf_info on the stack in the right place. */
261 sp = (elf_addr_t __user *)envp + 1;
262 copy_to_user(sp, elf_info, ei_index * sizeof(elf_addr_t));
267 static unsigned long elf_map(struct file *filep, unsigned long addr,
268 struct elf_phdr *eppnt, int prot, int type,
269 unsigned long total_size)
271 unsigned long map_addr;
272 unsigned long size = eppnt->p_filesz + ELF_PAGEOFFSET(eppnt->p_vaddr);
273 unsigned long off = eppnt->p_offset - ELF_PAGEOFFSET(eppnt->p_vaddr);
275 addr = ELF_PAGESTART(addr);
276 size = ELF_PAGEALIGN(size);
278 down_write(¤t->mm->mmap_sem);
281 * total_size is the size of the ELF (interpreter) image.
282 * The _first_ mmap needs to know the full size, otherwise
283 * randomization might put this image into an overlapping
284 * position with the ELF binary image. (since size < total_size)
285 * So we first map the 'big' image - and unmap the remainder at
286 * the end. (which unmap is needed for ELF images with holes.)
289 total_size = ELF_PAGEALIGN(total_size);
290 map_addr = do_mmap(filep, addr, total_size, prot, type, off);
291 if (!BAD_ADDR(map_addr))
292 do_munmap(current->mm, map_addr+size, total_size-size);
294 map_addr = do_mmap(filep, addr, size, prot, type, off);
296 up_write(¤t->mm->mmap_sem);
301 #endif /* !elf_map */
303 static inline unsigned long total_mapping_size(struct elf_phdr *cmds, int nr)
305 int i, first_idx = -1, last_idx = -1;
307 for (i = 0; i < nr; i++)
308 if (cmds[i].p_type == PT_LOAD) {
317 return cmds[last_idx].p_vaddr + cmds[last_idx].p_memsz -
318 ELF_PAGESTART(cmds[first_idx].p_vaddr);
321 /* This is much more generalized than the library routine read function,
322 so we keep this separate. Technically the library read function
323 is only provided so that we can read a.out libraries that have
326 static unsigned long load_elf_interp(struct elfhdr * interp_elf_ex,
327 struct file * interpreter,
328 unsigned long *interp_load_addr,
329 unsigned long no_base)
331 struct elf_phdr *elf_phdata;
332 struct elf_phdr *eppnt;
333 unsigned long load_addr = 0;
334 int load_addr_set = 0;
335 unsigned long last_bss = 0, elf_bss = 0;
336 unsigned long error = ~0UL;
337 unsigned long total_size;
340 /* First of all, some simple consistency checks */
341 if (interp_elf_ex->e_type != ET_EXEC &&
342 interp_elf_ex->e_type != ET_DYN)
344 if (!elf_check_arch(interp_elf_ex))
346 if (!interpreter->f_op || !interpreter->f_op->mmap)
350 * If the size of this structure has changed, then punt, since
351 * we will be doing the wrong thing.
353 if (interp_elf_ex->e_phentsize != sizeof(struct elf_phdr))
355 if (interp_elf_ex->e_phnum > 65536U / sizeof(struct elf_phdr))
358 /* Now read in all of the header information */
360 size = sizeof(struct elf_phdr) * interp_elf_ex->e_phnum;
361 if (size > ELF_MIN_ALIGN)
363 elf_phdata = (struct elf_phdr *) kmalloc(size, GFP_KERNEL);
367 retval = kernel_read(interpreter,interp_elf_ex->e_phoff,(char *)elf_phdata,size);
369 if (retval != size) {
375 total_size = total_mapping_size(elf_phdata, interp_elf_ex->e_phnum);
380 for (i=0; i<interp_elf_ex->e_phnum; i++, eppnt++) {
381 if (eppnt->p_type == PT_LOAD) {
382 int elf_type = MAP_PRIVATE | MAP_DENYWRITE;
384 unsigned long vaddr = 0;
385 unsigned long k, map_addr;
387 if (eppnt->p_flags & PF_R) elf_prot = PROT_READ;
388 if (eppnt->p_flags & PF_W) elf_prot |= PROT_WRITE;
389 if (eppnt->p_flags & PF_X) elf_prot |= PROT_EXEC;
390 vaddr = eppnt->p_vaddr;
391 if (interp_elf_ex->e_type == ET_EXEC || load_addr_set)
392 elf_type |= MAP_FIXED;
393 else if (no_base && interp_elf_ex->e_type == ET_DYN)
396 map_addr = elf_map(interpreter, load_addr + vaddr, eppnt, elf_prot, elf_type, total_size);
399 if (BAD_ADDR(map_addr))
402 if (!load_addr_set && interp_elf_ex->e_type == ET_DYN) {
403 load_addr = map_addr - ELF_PAGESTART(vaddr);
408 * Check to see if the section's size will overflow the
409 * allowed task size. Note that p_filesz must always be
410 * <= p_memsize so it is only necessary to check p_memsz.
412 k = load_addr + eppnt->p_vaddr;
413 if (k > TASK_SIZE || eppnt->p_filesz > eppnt->p_memsz ||
414 eppnt->p_memsz > TASK_SIZE || TASK_SIZE - eppnt->p_memsz < k) {
420 * Find the end of the file mapping for this phdr, and keep
421 * track of the largest address we see for this.
423 k = load_addr + eppnt->p_vaddr + eppnt->p_filesz;
428 * Do the same thing for the memory mapping - between
429 * elf_bss and last_bss is the bss section.
431 k = load_addr + eppnt->p_memsz + eppnt->p_vaddr;
438 * Now fill out the bss section. First pad the last page up
439 * to the page boundary, and then perform a mmap to make sure
440 * that there are zero-mapped pages up to and including the
444 elf_bss = ELF_PAGESTART(elf_bss + ELF_MIN_ALIGN - 1); /* What we have mapped so far */
446 /* Map the last of the bss segment */
447 if (last_bss > elf_bss) {
448 error = do_brk(elf_bss, last_bss - elf_bss);
453 *interp_load_addr = load_addr;
454 error = ((unsigned long) interp_elf_ex->e_entry) + load_addr;
462 static unsigned long load_aout_interp(struct exec * interp_ex,
463 struct file * interpreter)
465 unsigned long text_data, elf_entry = ~0UL;
469 current->mm->end_code = interp_ex->a_text;
470 text_data = interp_ex->a_text + interp_ex->a_data;
471 current->mm->end_data = text_data;
472 current->mm->brk = interp_ex->a_bss + text_data;
474 switch (N_MAGIC(*interp_ex)) {
477 addr = (char __user *)0;
481 offset = N_TXTOFF(*interp_ex);
482 addr = (char __user *) N_TXTADDR(*interp_ex);
488 do_brk(0, text_data);
489 if (!interpreter->f_op || !interpreter->f_op->read)
491 if (interpreter->f_op->read(interpreter, addr, text_data, &offset) < 0)
493 flush_icache_range((unsigned long)addr,
494 (unsigned long)addr + text_data);
496 do_brk(ELF_PAGESTART(text_data + ELF_MIN_ALIGN - 1),
498 elf_entry = interp_ex->a_entry;
505 * These are the functions used to load ELF style executables and shared
506 * libraries. There is no binary dependent code anywhere else.
509 #define INTERPRETER_NONE 0
510 #define INTERPRETER_AOUT 1
511 #define INTERPRETER_ELF 2
514 static int load_elf_binary(struct linux_binprm * bprm, struct pt_regs * regs)
516 struct file *interpreter = NULL; /* to shut gcc up */
517 unsigned long load_addr = 0, load_bias = 0;
518 int load_addr_set = 0;
519 char * elf_interpreter = NULL;
520 unsigned int interpreter_type = INTERPRETER_NONE;
521 unsigned char ibcs2_interpreter = 0;
523 struct elf_phdr * elf_ppnt, *elf_phdata;
524 unsigned long elf_bss, elf_brk;
528 unsigned long elf_entry, interp_load_addr = 0;
529 unsigned long start_code, end_code, start_data, end_data;
530 unsigned long reloc_func_desc = 0;
531 char passed_fileno[6];
532 struct files_struct *files;
533 int have_pt_gnu_stack, executable_stack, relocexec, old_relocexec = current->flags & PF_RELOCEXEC;
534 unsigned long def_flags = 0;
536 struct elfhdr elf_ex;
537 struct elfhdr interp_elf_ex;
538 struct exec interp_ex;
541 loc = kmalloc(sizeof(*loc), GFP_KERNEL);
547 /* Get the exec-header */
548 loc->elf_ex = *((struct elfhdr *) bprm->buf);
551 /* First of all, some simple consistency checks */
552 if (memcmp(loc->elf_ex.e_ident, ELFMAG, SELFMAG) != 0)
555 if (loc->elf_ex.e_type != ET_EXEC && loc->elf_ex.e_type != ET_DYN)
557 if (!elf_check_arch(&loc->elf_ex))
559 if (!bprm->file->f_op||!bprm->file->f_op->mmap)
562 /* Now read in all of the header information */
565 if (loc->elf_ex.e_phentsize != sizeof(struct elf_phdr))
567 if (loc->elf_ex.e_phnum > 65536U / sizeof(struct elf_phdr))
569 size = loc->elf_ex.e_phnum * sizeof(struct elf_phdr);
570 elf_phdata = (struct elf_phdr *) kmalloc(size, GFP_KERNEL);
574 retval = kernel_read(bprm->file, loc->elf_ex.e_phoff, (char *) elf_phdata, size);
575 if (retval != size) {
581 files = current->files; /* Refcounted so ok */
582 retval = unshare_files();
585 if (files == current->files) {
586 put_files_struct(files);
590 /* exec will make our files private anyway, but for the a.out
591 loader stuff we need to do it earlier */
593 retval = get_unused_fd();
596 get_file(bprm->file);
597 fd_install(elf_exec_fileno = retval, bprm->file);
599 elf_ppnt = elf_phdata;
608 for (i = 0; i < loc->elf_ex.e_phnum; i++) {
609 if (elf_ppnt->p_type == PT_INTERP) {
610 /* This is the program interpreter used for
611 * shared libraries - for now assume that this
612 * is an a.out format binary
616 if (elf_ppnt->p_filesz > PATH_MAX ||
617 elf_ppnt->p_filesz == 0)
619 elf_interpreter = (char *) kmalloc(elf_ppnt->p_filesz,
621 if (!elf_interpreter)
624 retval = kernel_read(bprm->file, elf_ppnt->p_offset,
627 if (retval != elf_ppnt->p_filesz) {
630 goto out_free_interp;
632 /* make sure path is NULL terminated */
634 if (elf_interpreter[elf_ppnt->p_filesz - 1] != '\0')
635 goto out_free_interp;
637 /* If the program interpreter is one of these two,
638 * then assume an iBCS2 image. Otherwise assume
639 * a native linux image.
641 if (strcmp(elf_interpreter,"/usr/lib/libc.so.1") == 0 ||
642 strcmp(elf_interpreter,"/usr/lib/ld.so.1") == 0)
643 ibcs2_interpreter = 1;
646 * The early SET_PERSONALITY here is so that the lookup
647 * for the interpreter happens in the namespace of the
648 * to-be-execed image. SET_PERSONALITY can select an
651 * However, SET_PERSONALITY is NOT allowed to switch
652 * this task into the new images's memory mapping
653 * policy - that is, TASK_SIZE must still evaluate to
654 * that which is appropriate to the execing application.
655 * This is because exit_mmap() needs to have TASK_SIZE
656 * evaluate to the size of the old image.
658 * So if (say) a 64-bit application is execing a 32-bit
659 * application it is the architecture's responsibility
660 * to defer changing the value of TASK_SIZE until the
661 * switch really is going to happen - do this in
662 * flush_thread(). - akpm
664 SET_PERSONALITY(loc->elf_ex, ibcs2_interpreter);
666 interpreter = open_exec(elf_interpreter);
667 retval = PTR_ERR(interpreter);
668 if (IS_ERR(interpreter))
669 goto out_free_interp;
670 retval = kernel_read(interpreter, 0, bprm->buf, BINPRM_BUF_SIZE);
671 if (retval != BINPRM_BUF_SIZE) {
674 goto out_free_dentry;
677 /* Get the exec headers */
678 loc->interp_ex = *((struct exec *) bprm->buf);
679 loc->interp_elf_ex = *((struct elfhdr *) bprm->buf);
685 elf_ppnt = elf_phdata;
686 executable_stack = EXSTACK_DEFAULT;
688 for (i = 0; i < loc->elf_ex.e_phnum; i++, elf_ppnt++)
689 if (elf_ppnt->p_type == PT_GNU_STACK) {
690 if (elf_ppnt->p_flags & PF_X)
691 executable_stack = EXSTACK_ENABLE_X;
693 executable_stack = EXSTACK_DISABLE_X;
696 have_pt_gnu_stack = (i < loc->elf_ex.e_phnum);
700 if (current->personality == PER_LINUX)
701 switch (exec_shield) {
703 if (executable_stack == EXSTACK_DISABLE_X) {
704 current->flags |= PF_RELOCEXEC;
705 relocexec = PF_RELOCEXEC;
710 executable_stack = EXSTACK_DISABLE_X;
711 current->flags |= PF_RELOCEXEC;
712 relocexec = PF_RELOCEXEC;
716 /* Some simple consistency checks for the interpreter */
717 if (elf_interpreter) {
718 interpreter_type = INTERPRETER_ELF | INTERPRETER_AOUT;
720 /* Now figure out which format our binary is */
721 if ((N_MAGIC(loc->interp_ex) != OMAGIC) &&
722 (N_MAGIC(loc->interp_ex) != ZMAGIC) &&
723 (N_MAGIC(loc->interp_ex) != QMAGIC))
724 interpreter_type = INTERPRETER_ELF;
726 if (memcmp(loc->interp_elf_ex.e_ident, ELFMAG, SELFMAG) != 0)
727 interpreter_type &= ~INTERPRETER_ELF;
730 if (!interpreter_type)
731 goto out_free_dentry;
733 /* Make sure only one type was selected */
734 if ((interpreter_type & INTERPRETER_ELF) &&
735 interpreter_type != INTERPRETER_ELF) {
736 // FIXME - ratelimit this before re-enabling
737 // printk(KERN_WARNING "ELF: Ambiguous type, using ELF\n");
738 interpreter_type = INTERPRETER_ELF;
740 /* Verify the interpreter has a valid arch */
741 if ((interpreter_type == INTERPRETER_ELF) &&
742 !elf_check_arch(&loc->interp_elf_ex))
743 goto out_free_dentry;
745 /* Executables without an interpreter also need a personality */
746 SET_PERSONALITY(loc->elf_ex, ibcs2_interpreter);
749 /* OK, we are done with that, now set up the arg stuff,
750 and then start this sucker up */
752 if ((!bprm->sh_bang) && (interpreter_type == INTERPRETER_AOUT)) {
753 char *passed_p = passed_fileno;
754 sprintf(passed_fileno, "%d", elf_exec_fileno);
756 if (elf_interpreter) {
757 retval = copy_strings_kernel(1, &passed_p, bprm);
759 goto out_free_dentry;
764 /* Flush all traces of the currently running executable */
765 retval = flush_old_exec(bprm);
767 goto out_free_dentry;
768 current->flags |= relocexec;
772 * Turn off the CS limit completely if exec-shield disabled or
775 if (!exec_shield || executable_stack != EXSTACK_DISABLE_X)
776 arch_add_exec_range(current->mm, -1);
779 /* Discard our unneeded old files struct */
782 put_files_struct(files);
786 /* OK, This is the point of no return */
787 current->mm->start_data = 0;
788 current->mm->end_data = 0;
789 current->mm->end_code = 0;
790 current->mm->mmap = NULL;
791 current->flags &= ~PF_FORKNOEXEC;
792 current->mm->def_flags = def_flags;
794 /* Do this immediately, since STACK_TOP as used in setup_arg_pages
795 may depend on the personality. */
796 SET_PERSONALITY(loc->elf_ex, ibcs2_interpreter);
797 if (elf_read_implies_exec(loc->elf_ex, have_pt_gnu_stack))
798 current->personality |= READ_IMPLIES_EXEC;
800 arch_pick_mmap_layout(current->mm);
802 /* Do this so that we can load the interpreter, if need be. We will
803 change some of these later */
804 // current->mm->rss = 0;
805 vx_rsspages_sub(current->mm, current->mm->rss);
806 current->mm->free_area_cache = current->mm->mmap_base;
807 retval = setup_arg_pages(bprm, executable_stack);
809 send_sig(SIGKILL, current, 0);
810 goto out_free_dentry;
813 current->mm->start_stack = bprm->p;
816 /* Now we do a little grungy work by mmaping the ELF image into
817 the correct location in memory.
820 for(i = 0, elf_ppnt = elf_phdata; i < loc->elf_ex.e_phnum; i++, elf_ppnt++) {
821 int elf_prot = 0, elf_flags;
822 unsigned long k, vaddr;
824 if (elf_ppnt->p_type != PT_LOAD)
827 if (unlikely (elf_brk > elf_bss)) {
830 /* There was a PT_LOAD segment with p_memsz > p_filesz
831 before this one. Map anonymous pages, if needed,
832 and clear the area. */
833 retval = set_brk (elf_bss + load_bias,
834 elf_brk + load_bias);
836 send_sig(SIGKILL, current, 0);
837 goto out_free_dentry;
839 nbyte = ELF_PAGEOFFSET(elf_bss);
841 nbyte = ELF_MIN_ALIGN - nbyte;
842 if (nbyte > elf_brk - elf_bss)
843 nbyte = elf_brk - elf_bss;
844 clear_user((void __user *) elf_bss + load_bias, nbyte);
848 if (elf_ppnt->p_flags & PF_R) elf_prot |= PROT_READ;
849 if (elf_ppnt->p_flags & PF_W) elf_prot |= PROT_WRITE;
850 if (elf_ppnt->p_flags & PF_X) elf_prot |= PROT_EXEC;
852 elf_flags = MAP_PRIVATE|MAP_DENYWRITE|MAP_EXECUTABLE;
854 vaddr = elf_ppnt->p_vaddr;
855 if (loc->elf_ex.e_type == ET_EXEC || load_addr_set)
856 elf_flags |= MAP_FIXED;
857 else if (loc->elf_ex.e_type == ET_DYN)
861 load_bias = ELF_PAGESTART(ELF_ET_DYN_BASE - vaddr);
864 error = elf_map(bprm->file, load_bias + vaddr, elf_ppnt, elf_prot, elf_flags, 0);
865 if (BAD_ADDR(error)) {
866 send_sig(SIGKILL, current, 0);
867 goto out_free_dentry;
870 if (!load_addr_set) {
872 load_addr = (elf_ppnt->p_vaddr - elf_ppnt->p_offset);
873 if (loc->elf_ex.e_type == ET_DYN) {
875 ELF_PAGESTART(load_bias + vaddr);
876 load_addr += load_bias;
877 reloc_func_desc = load_bias;
880 k = elf_ppnt->p_vaddr;
881 if (k < start_code) start_code = k;
882 if (start_data < k) start_data = k;
885 * Check to see if the section's size will overflow the
886 * allowed task size. Note that p_filesz must always be
887 * <= p_memsz so it is only necessary to check p_memsz.
889 if (k > TASK_SIZE || elf_ppnt->p_filesz > elf_ppnt->p_memsz ||
890 elf_ppnt->p_memsz > TASK_SIZE ||
891 TASK_SIZE - elf_ppnt->p_memsz < k) {
892 /* set_brk can never work. Avoid overflows. */
893 send_sig(SIGKILL, current, 0);
894 goto out_free_dentry;
897 k = elf_ppnt->p_vaddr + elf_ppnt->p_filesz;
901 if ((elf_ppnt->p_flags & PF_X) && end_code < k)
905 k = elf_ppnt->p_vaddr + elf_ppnt->p_memsz;
910 loc->elf_ex.e_entry += load_bias;
911 elf_bss += load_bias;
912 elf_brk += load_bias;
913 start_code += load_bias;
914 end_code += load_bias;
915 start_data += load_bias;
916 end_data += load_bias;
918 /* Calling set_brk effectively mmaps the pages that we need
919 * for the bss and break sections. We must do this before
920 * mapping in the interpreter, to make sure it doesn't wind
921 * up getting placed where the bss needs to go.
923 retval = set_brk(elf_bss, elf_brk);
925 send_sig(SIGKILL, current, 0);
926 goto out_free_dentry;
930 if (elf_interpreter) {
931 if (interpreter_type == INTERPRETER_AOUT)
932 elf_entry = load_aout_interp(&loc->interp_ex,
935 elf_entry = load_elf_interp(&loc->interp_elf_ex,
939 if (BAD_ADDR(elf_entry)) {
940 printk(KERN_ERR "Unable to load interpreter\n");
941 send_sig(SIGSEGV, current, 0);
942 retval = -ENOEXEC; /* Nobody gets to see this, but.. */
943 goto out_free_dentry;
945 reloc_func_desc = interp_load_addr;
947 allow_write_access(interpreter);
949 kfree(elf_interpreter);
951 elf_entry = loc->elf_ex.e_entry;
956 if (interpreter_type != INTERPRETER_AOUT)
957 sys_close(elf_exec_fileno);
959 set_binfmt(&elf_format);
962 * Map the vsyscall trampoline. This address is then passed via
965 #ifdef __HAVE_ARCH_VSYSCALL
970 current->flags &= ~PF_FORKNOEXEC;
971 create_elf_tables(bprm, &loc->elf_ex, (interpreter_type == INTERPRETER_AOUT),
972 load_addr, interp_load_addr);
973 /* N.B. passed_fileno might not be initialized? */
974 if (interpreter_type == INTERPRETER_AOUT)
975 current->mm->arg_start += strlen(passed_fileno) + 1;
976 current->mm->end_code = end_code;
977 current->mm->start_code = start_code;
978 current->mm->start_data = start_data;
979 current->mm->end_data = end_data;
980 current->mm->start_stack = bprm->p;
982 #ifdef __HAVE_ARCH_RANDOMIZE_BRK
983 if (current->flags & PF_RELOCEXEC)
984 randomize_brk(elf_brk);
986 if (current->personality & MMAP_PAGE_ZERO) {
987 /* Why this, you ask??? Well SVr4 maps page 0 as read-only,
988 and some applications "depend" upon this behavior.
989 Since we do not have the power to recompile these, we
990 emulate the SVr4 behavior. Sigh. */
991 down_write(¤t->mm->mmap_sem);
992 error = do_mmap(NULL, 0, PAGE_SIZE, PROT_READ | PROT_EXEC,
993 MAP_FIXED | MAP_PRIVATE, 0);
994 up_write(¤t->mm->mmap_sem);
999 * The ABI may specify that certain registers be set up in special
1000 * ways (on i386 %edx is the address of a DT_FINI function, for
1001 * example. In addition, it may also specify (eg, PowerPC64 ELF)
1002 * that the e_entry field is the address of the function descriptor
1003 * for the startup routine, rather than the address of the startup
1004 * routine itself. This macro performs whatever initialization to
1005 * the regs structure is required as well as any relocations to the
1006 * function descriptor entries when executing dynamically links apps.
1008 ELF_PLAT_INIT(regs, reloc_func_desc);
1011 start_thread(regs, elf_entry, bprm->p);
1012 if (unlikely(current->ptrace & PT_PTRACED)) {
1013 if (current->ptrace & PT_TRACE_EXEC)
1014 ptrace_notify ((PTRACE_EVENT_EXEC << 8) | SIGTRAP);
1016 send_sig(SIGTRAP, current, 0);
1026 allow_write_access(interpreter);
1030 if (elf_interpreter)
1031 kfree(elf_interpreter);
1033 sys_close(elf_exec_fileno);
1036 put_files_struct(current->files);
1037 current->files = files;
1041 current->flags &= ~PF_RELOCEXEC;
1042 current->flags |= old_relocexec;
1046 /* This is really simpleminded and specialized - we are loading an
1047 a.out library that is given an ELF header. */
1049 static int load_elf_library(struct file *file)
1051 struct elf_phdr *elf_phdata;
1052 unsigned long elf_bss, bss, len;
1053 int retval, error, i, j;
1054 struct elfhdr elf_ex;
1057 retval = kernel_read(file, 0, (char *) &elf_ex, sizeof(elf_ex));
1058 if (retval != sizeof(elf_ex))
1061 if (memcmp(elf_ex.e_ident, ELFMAG, SELFMAG) != 0)
1064 /* First of all, some simple consistency checks */
1065 if (elf_ex.e_type != ET_EXEC || elf_ex.e_phnum > 2 ||
1066 !elf_check_arch(&elf_ex) || !file->f_op || !file->f_op->mmap)
1069 /* Now read in all of the header information */
1071 j = sizeof(struct elf_phdr) * elf_ex.e_phnum;
1072 /* j < ELF_MIN_ALIGN because elf_ex.e_phnum <= 2 */
1075 elf_phdata = (struct elf_phdr *) kmalloc(j, GFP_KERNEL);
1080 retval = kernel_read(file, elf_ex.e_phoff, (char *) elf_phdata, j);
1084 for (j = 0, i = 0; i<elf_ex.e_phnum; i++)
1085 if ((elf_phdata + i)->p_type == PT_LOAD) j++;
1089 while (elf_phdata->p_type != PT_LOAD) elf_phdata++;
1091 /* Now use mmap to map the library into memory. */
1092 down_write(¤t->mm->mmap_sem);
1093 error = do_mmap(file,
1094 ELF_PAGESTART(elf_phdata->p_vaddr),
1095 (elf_phdata->p_filesz +
1096 ELF_PAGEOFFSET(elf_phdata->p_vaddr)),
1097 PROT_READ | PROT_WRITE | PROT_EXEC,
1098 MAP_FIXED | MAP_PRIVATE | MAP_DENYWRITE,
1099 (elf_phdata->p_offset -
1100 ELF_PAGEOFFSET(elf_phdata->p_vaddr)));
1101 up_write(¤t->mm->mmap_sem);
1102 if (error != ELF_PAGESTART(elf_phdata->p_vaddr))
1105 elf_bss = elf_phdata->p_vaddr + elf_phdata->p_filesz;
1108 len = ELF_PAGESTART(elf_phdata->p_filesz + elf_phdata->p_vaddr + ELF_MIN_ALIGN - 1);
1109 bss = elf_phdata->p_memsz + elf_phdata->p_vaddr;
1111 do_brk(len, bss - len);
1121 * Note that some platforms still use traditional core dumps and not
1122 * the ELF core dump. Each platform can select it as appropriate.
1124 #ifdef USE_ELF_CORE_DUMP
1129 * Modelled on fs/exec.c:aout_core_dump()
1130 * Jeremy Fitzhardinge <jeremy@sw.oz.au>
1133 * These are the only things you should do on a core-file: use only these
1134 * functions to write out all the necessary info.
1136 static int dump_write(struct file *file, const void *addr, int nr)
1138 return file->f_op->write(file, addr, nr, &file->f_pos) == nr;
1141 static int dump_seek(struct file *file, off_t off)
1143 if (file->f_op->llseek) {
1144 if (file->f_op->llseek(file, off, 0) != off)
1152 * Decide whether a segment is worth dumping; default is yes to be
1153 * sure (missing info is worse than too much; etc).
1154 * Personally I'd include everything, and use the coredump limit...
1156 * I think we should skip something. But I am not sure how. H.J.
1158 static int maydump(struct vm_area_struct *vma)
1161 * If we may not read the contents, don't allow us to dump
1162 * them either. "dump_write()" can't handle it anyway.
1164 if (!(vma->vm_flags & VM_READ))
1167 /* Do not dump I/O mapped devices! -DaveM */
1168 if (vma->vm_flags & VM_IO)
1171 if (vma->vm_flags & (VM_WRITE|VM_GROWSUP|VM_GROWSDOWN))
1173 if (vma->vm_flags & (VM_READ|VM_EXEC|VM_EXECUTABLE|VM_SHARED))
1179 #define roundup(x, y) ((((x)+((y)-1))/(y))*(y))
1181 /* An ELF note in memory */
1186 unsigned int datasz;
1190 static int notesize(struct memelfnote *en)
1194 sz = sizeof(struct elf_note);
1195 sz += roundup(strlen(en->name) + 1, 4);
1196 sz += roundup(en->datasz, 4);
1201 #define DUMP_WRITE(addr, nr) \
1202 do { if (!dump_write(file, (addr), (nr))) return 0; } while(0)
1203 #define DUMP_SEEK(off) \
1204 do { if (!dump_seek(file, (off))) return 0; } while(0)
1206 static int writenote(struct memelfnote *men, struct file *file)
1210 en.n_namesz = strlen(men->name) + 1;
1211 en.n_descsz = men->datasz;
1212 en.n_type = men->type;
1214 DUMP_WRITE(&en, sizeof(en));
1215 DUMP_WRITE(men->name, en.n_namesz);
1216 /* XXX - cast from long long to long to avoid need for libgcc.a */
1217 DUMP_SEEK(roundup((unsigned long)file->f_pos, 4)); /* XXX */
1218 DUMP_WRITE(men->data, men->datasz);
1219 DUMP_SEEK(roundup((unsigned long)file->f_pos, 4)); /* XXX */
1226 #define DUMP_WRITE(addr, nr) \
1227 if ((size += (nr)) > limit || !dump_write(file, (addr), (nr))) \
1229 #define DUMP_SEEK(off) \
1230 if (!dump_seek(file, (off))) \
1233 static inline void fill_elf_header(struct elfhdr *elf, int segs)
1235 memcpy(elf->e_ident, ELFMAG, SELFMAG);
1236 elf->e_ident[EI_CLASS] = ELF_CLASS;
1237 elf->e_ident[EI_DATA] = ELF_DATA;
1238 elf->e_ident[EI_VERSION] = EV_CURRENT;
1239 elf->e_ident[EI_OSABI] = ELF_OSABI;
1240 memset(elf->e_ident+EI_PAD, 0, EI_NIDENT-EI_PAD);
1242 elf->e_type = ET_CORE;
1243 elf->e_machine = ELF_ARCH;
1244 elf->e_version = EV_CURRENT;
1246 elf->e_phoff = sizeof(struct elfhdr);
1249 elf->e_ehsize = sizeof(struct elfhdr);
1250 elf->e_phentsize = sizeof(struct elf_phdr);
1251 elf->e_phnum = segs;
1252 elf->e_shentsize = 0;
1254 elf->e_shstrndx = 0;
1258 static inline void fill_elf_note_phdr(struct elf_phdr *phdr, int sz, off_t offset)
1260 phdr->p_type = PT_NOTE;
1261 phdr->p_offset = offset;
1264 phdr->p_filesz = sz;
1271 static void fill_note(struct memelfnote *note, const char *name, int type,
1272 unsigned int sz, void *data)
1282 * fill up all the fields in prstatus from the given task struct, except registers
1283 * which need to be filled up separately.
1285 static void fill_prstatus(struct elf_prstatus *prstatus,
1286 struct task_struct *p, long signr)
1288 prstatus->pr_info.si_signo = prstatus->pr_cursig = signr;
1289 prstatus->pr_sigpend = p->pending.signal.sig[0];
1290 prstatus->pr_sighold = p->blocked.sig[0];
1291 prstatus->pr_pid = p->pid;
1292 prstatus->pr_ppid = p->parent->pid;
1293 prstatus->pr_pgrp = process_group(p);
1294 prstatus->pr_sid = p->signal->session;
1295 if (p->pid == p->tgid) {
1297 * This is the record for the group leader. Add in the
1298 * cumulative times of previous dead threads. This total
1299 * won't include the time of each live thread whose state
1300 * is included in the core dump. The final total reported
1301 * to our parent process when it calls wait4 will include
1302 * those sums as well as the little bit more time it takes
1303 * this and each other thread to finish dying after the
1304 * core dump synchronization phase.
1306 jiffies_to_timeval(p->utime + p->signal->utime,
1307 &prstatus->pr_utime);
1308 jiffies_to_timeval(p->stime + p->signal->stime,
1309 &prstatus->pr_stime);
1311 jiffies_to_timeval(p->utime, &prstatus->pr_utime);
1312 jiffies_to_timeval(p->stime, &prstatus->pr_stime);
1314 jiffies_to_timeval(p->signal->cutime, &prstatus->pr_cutime);
1315 jiffies_to_timeval(p->signal->cstime, &prstatus->pr_cstime);
1318 static void fill_psinfo(struct elf_prpsinfo *psinfo, struct task_struct *p,
1319 struct mm_struct *mm)
1323 /* first copy the parameters from user space */
1324 memset(psinfo, 0, sizeof(struct elf_prpsinfo));
1326 len = mm->arg_end - mm->arg_start;
1327 if (len >= ELF_PRARGSZ)
1328 len = ELF_PRARGSZ-1;
1329 copy_from_user(&psinfo->pr_psargs,
1330 (const char __user *)mm->arg_start, len);
1331 for(i = 0; i < len; i++)
1332 if (psinfo->pr_psargs[i] == 0)
1333 psinfo->pr_psargs[i] = ' ';
1334 psinfo->pr_psargs[len] = 0;
1336 psinfo->pr_pid = p->pid;
1337 psinfo->pr_ppid = p->parent->pid;
1338 psinfo->pr_pgrp = process_group(p);
1339 psinfo->pr_sid = p->signal->session;
1341 i = p->state ? ffz(~p->state) + 1 : 0;
1342 psinfo->pr_state = i;
1343 psinfo->pr_sname = (i < 0 || i > 5) ? '.' : "RSDTZW"[i];
1344 psinfo->pr_zomb = psinfo->pr_sname == 'Z';
1345 psinfo->pr_nice = task_nice(p);
1346 psinfo->pr_flag = p->flags;
1347 SET_UID(psinfo->pr_uid, p->uid);
1348 SET_GID(psinfo->pr_gid, p->gid);
1349 strncpy(psinfo->pr_fname, p->comm, sizeof(psinfo->pr_fname));
1354 /* Here is the structure in which status of each thread is captured. */
1355 struct elf_thread_status
1357 struct list_head list;
1358 struct elf_prstatus prstatus; /* NT_PRSTATUS */
1359 elf_fpregset_t fpu; /* NT_PRFPREG */
1360 struct task_struct *thread;
1361 #ifdef ELF_CORE_COPY_XFPREGS
1362 elf_fpxregset_t xfpu; /* NT_PRXFPREG */
1364 struct memelfnote notes[3];
1369 * In order to add the specific thread information for the elf file format,
1370 * we need to keep a linked list of every threads pr_status and then
1371 * create a single section for them in the final core file.
1373 static int elf_dump_thread_status(long signr, struct elf_thread_status *t)
1376 struct task_struct *p = t->thread;
1379 fill_prstatus(&t->prstatus, p, signr);
1380 elf_core_copy_task_regs(p, &t->prstatus.pr_reg);
1382 fill_note(&t->notes[0], "CORE", NT_PRSTATUS, sizeof(t->prstatus), &(t->prstatus));
1384 sz += notesize(&t->notes[0]);
1386 if ((t->prstatus.pr_fpvalid = elf_core_copy_task_fpregs(p, NULL, &t->fpu))) {
1387 fill_note(&t->notes[1], "CORE", NT_PRFPREG, sizeof(t->fpu), &(t->fpu));
1389 sz += notesize(&t->notes[1]);
1392 #ifdef ELF_CORE_COPY_XFPREGS
1393 if (elf_core_copy_task_xfpregs(p, &t->xfpu)) {
1394 fill_note(&t->notes[2], "LINUX", NT_PRXFPREG, sizeof(t->xfpu), &t->xfpu);
1396 sz += notesize(&t->notes[2]);
1405 * This is a two-pass process; first we find the offsets of the bits,
1406 * and then they are actually written out. If we run out of core limit
1409 static int elf_core_dump(long signr, struct pt_regs * regs, struct file * file)
1417 struct vm_area_struct *vma;
1418 struct elfhdr *elf = NULL;
1419 off_t offset = 0, dataoff;
1420 unsigned long limit = current->rlim[RLIMIT_CORE].rlim_cur;
1422 struct memelfnote *notes = NULL;
1423 struct elf_prstatus *prstatus = NULL; /* NT_PRSTATUS */
1424 struct elf_prpsinfo *psinfo = NULL; /* NT_PRPSINFO */
1425 struct task_struct *g, *p;
1426 LIST_HEAD(thread_list);
1427 struct list_head *t;
1428 elf_fpregset_t *fpu = NULL;
1429 #ifdef ELF_CORE_COPY_XFPREGS
1430 elf_fpxregset_t *xfpu = NULL;
1432 int thread_status_size = 0;
1436 * We no longer stop all VM operations.
1438 * This is because those proceses that could possibly change map_count or
1439 * the mmap / vma pages are now blocked in do_exit on current finishing
1442 * Only ptrace can touch these memory addresses, but it doesn't change
1443 * the map_count or the pages allocated. So no possibility of crashing
1444 * exists while dumping the mm->vm_next areas to the core file.
1447 /* alloc memory for large data structures: too large to be on stack */
1448 elf = kmalloc(sizeof(*elf), GFP_KERNEL);
1451 prstatus = kmalloc(sizeof(*prstatus), GFP_KERNEL);
1454 psinfo = kmalloc(sizeof(*psinfo), GFP_KERNEL);
1457 notes = kmalloc(NUM_NOTES * sizeof(struct memelfnote), GFP_KERNEL);
1460 fpu = kmalloc(sizeof(*fpu), GFP_KERNEL);
1463 #ifdef ELF_CORE_COPY_XFPREGS
1464 xfpu = kmalloc(sizeof(*xfpu), GFP_KERNEL);
1470 struct elf_thread_status *tmp;
1471 read_lock(&tasklist_lock);
1473 if (current->mm == p->mm && current != p) {
1474 tmp = kmalloc(sizeof(*tmp), GFP_ATOMIC);
1476 read_unlock(&tasklist_lock);
1479 memset(tmp, 0, sizeof(*tmp));
1480 INIT_LIST_HEAD(&tmp->list);
1482 list_add(&tmp->list, &thread_list);
1484 while_each_thread(g,p);
1485 read_unlock(&tasklist_lock);
1486 list_for_each(t, &thread_list) {
1487 struct elf_thread_status *tmp;
1490 tmp = list_entry(t, struct elf_thread_status, list);
1491 sz = elf_dump_thread_status(signr, tmp);
1492 thread_status_size += sz;
1495 /* now collect the dump for the current */
1496 memset(prstatus, 0, sizeof(*prstatus));
1497 fill_prstatus(prstatus, current, signr);
1498 elf_core_copy_regs(&prstatus->pr_reg, regs);
1500 segs = current->mm->map_count;
1501 #ifdef ELF_CORE_EXTRA_PHDRS
1502 segs += ELF_CORE_EXTRA_PHDRS;
1506 fill_elf_header(elf, segs+1); /* including notes section */
1509 current->flags |= PF_DUMPCORE;
1512 * Set up the notes in similar form to SVR4 core dumps made
1513 * with info from their /proc.
1516 fill_note(notes +0, "CORE", NT_PRSTATUS, sizeof(*prstatus), prstatus);
1518 fill_psinfo(psinfo, current->group_leader, current->mm);
1519 fill_note(notes +1, "CORE", NT_PRPSINFO, sizeof(*psinfo), psinfo);
1521 fill_note(notes +2, "CORE", NT_TASKSTRUCT, sizeof(*current), current);
1525 auxv = (elf_addr_t *) current->mm->saved_auxv;
1530 while (auxv[i - 2] != AT_NULL);
1531 fill_note(¬es[numnote++], "CORE", NT_AUXV,
1532 i * sizeof (elf_addr_t), auxv);
1534 /* Try to dump the FPU. */
1535 if ((prstatus->pr_fpvalid = elf_core_copy_task_fpregs(current, regs, fpu)))
1536 fill_note(notes + numnote++,
1537 "CORE", NT_PRFPREG, sizeof(*fpu), fpu);
1538 #ifdef ELF_CORE_COPY_XFPREGS
1539 if (elf_core_copy_task_xfpregs(current, xfpu))
1540 fill_note(notes + numnote++,
1541 "LINUX", NT_PRXFPREG, sizeof(*xfpu), xfpu);
1547 DUMP_WRITE(elf, sizeof(*elf));
1548 offset += sizeof(*elf); /* Elf header */
1549 offset += (segs+1) * sizeof(struct elf_phdr); /* Program headers */
1551 /* Write notes phdr entry */
1553 struct elf_phdr phdr;
1556 for (i = 0; i < numnote; i++)
1557 sz += notesize(notes + i);
1559 sz += thread_status_size;
1561 fill_elf_note_phdr(&phdr, sz, offset);
1563 DUMP_WRITE(&phdr, sizeof(phdr));
1566 /* Page-align dumped data */
1567 dataoff = offset = roundup(offset, ELF_EXEC_PAGESIZE);
1569 /* Write program headers for segments dump */
1570 for (vma = current->mm->mmap; vma != NULL; vma = vma->vm_next) {
1571 struct elf_phdr phdr;
1574 sz = vma->vm_end - vma->vm_start;
1576 phdr.p_type = PT_LOAD;
1577 phdr.p_offset = offset;
1578 phdr.p_vaddr = vma->vm_start;
1580 phdr.p_filesz = maydump(vma) ? sz : 0;
1582 offset += phdr.p_filesz;
1583 phdr.p_flags = vma->vm_flags & VM_READ ? PF_R : 0;
1584 if (vma->vm_flags & VM_WRITE) phdr.p_flags |= PF_W;
1585 if (vma->vm_flags & VM_EXEC) phdr.p_flags |= PF_X;
1586 phdr.p_align = ELF_EXEC_PAGESIZE;
1588 DUMP_WRITE(&phdr, sizeof(phdr));
1591 #ifdef ELF_CORE_WRITE_EXTRA_PHDRS
1592 ELF_CORE_WRITE_EXTRA_PHDRS;
1595 /* write out the notes section */
1596 for (i = 0; i < numnote; i++)
1597 if (!writenote(notes + i, file))
1600 /* write out the thread status notes section */
1601 list_for_each(t, &thread_list) {
1602 struct elf_thread_status *tmp = list_entry(t, struct elf_thread_status, list);
1603 for (i = 0; i < tmp->num_notes; i++)
1604 if (!writenote(&tmp->notes[i], file))
1610 for (vma = current->mm->mmap; vma != NULL; vma = vma->vm_next) {
1616 for (addr = vma->vm_start;
1618 addr += PAGE_SIZE) {
1620 struct vm_area_struct *vma;
1622 if (get_user_pages(current, current->mm, addr, 1, 0, 1,
1623 &page, &vma) <= 0) {
1624 DUMP_SEEK (file->f_pos + PAGE_SIZE);
1626 if (page == ZERO_PAGE(addr)) {
1627 DUMP_SEEK (file->f_pos + PAGE_SIZE);
1630 flush_cache_page(vma, addr);
1632 if ((size += PAGE_SIZE) > limit ||
1633 !dump_write(file, kaddr,
1636 page_cache_release(page);
1641 page_cache_release(page);
1646 #ifdef ELF_CORE_WRITE_EXTRA_DATA
1647 ELF_CORE_WRITE_EXTRA_DATA;
1650 if ((off_t) file->f_pos != offset) {
1652 printk("elf_core_dump: file->f_pos (%ld) != offset (%ld)\n",
1653 (off_t) file->f_pos, offset);
1660 while(!list_empty(&thread_list)) {
1661 struct list_head *tmp = thread_list.next;
1663 kfree(list_entry(tmp, struct elf_thread_status, list));
1671 #ifdef ELF_CORE_COPY_XFPREGS
1678 #endif /* USE_ELF_CORE_DUMP */
1680 static int __init init_elf_binfmt(void)
1682 return register_binfmt(&elf_format);
1685 static void __exit exit_elf_binfmt(void)
1687 /* Remove the COFF and ELF loaders. */
1688 unregister_binfmt(&elf_format);
1691 core_initcall(init_elf_binfmt);
1692 module_exit(exit_elf_binfmt);
1693 MODULE_LICENSE("GPL");