2 * linux/fs/binfmt_elf.c
4 * These are the functions used to load ELF format executables as used
5 * on SVr4 machines. Information on the format may be found in the book
6 * "UNIX SYSTEM V RELEASE 4 Programmers Guide: Ansi C and Programming Support
9 * Copyright 1993, 1994: Eric Youngdale (ericy@cais.com).
12 #include <linux/module.h>
13 #include <linux/kernel.h>
15 #include <linux/stat.h>
16 #include <linux/time.h>
18 #include <linux/mman.h>
19 #include <linux/a.out.h>
20 #include <linux/errno.h>
21 #include <linux/signal.h>
22 #include <linux/binfmts.h>
23 #include <linux/string.h>
24 #include <linux/file.h>
25 #include <linux/fcntl.h>
26 #include <linux/ptrace.h>
27 #include <linux/slab.h>
28 #include <linux/shm.h>
29 #include <linux/personality.h>
30 #include <linux/elfcore.h>
31 #include <linux/init.h>
32 #include <linux/highuid.h>
33 #include <linux/smp.h>
34 #include <linux/smp_lock.h>
35 #include <linux/compiler.h>
36 #include <linux/highmem.h>
37 #include <linux/pagemap.h>
38 #include <linux/security.h>
39 #include <linux/syscalls.h>
40 #include <linux/random.h>
41 #include <linux/vs_memory.h>
43 #include <asm/uaccess.h>
44 #include <asm/param.h>
46 #include <asm/pgalloc.h>
48 #include <linux/elf.h>
50 static int load_elf_binary(struct linux_binprm * bprm, struct pt_regs * regs);
51 static int load_elf_library(struct file*);
52 static unsigned long elf_map (struct file *, unsigned long, struct elf_phdr *, int, int, unsigned long);
53 extern int dump_fpu (struct pt_regs *, elf_fpregset_t *);
56 #define elf_addr_t unsigned long
60 * If we don't support core dumping, then supply a NULL so we
63 #ifdef USE_ELF_CORE_DUMP
64 static int elf_core_dump(long signr, struct pt_regs * regs, struct file * file);
66 #define elf_core_dump NULL
69 #if ELF_EXEC_PAGESIZE > PAGE_SIZE
70 # define ELF_MIN_ALIGN ELF_EXEC_PAGESIZE
72 # define ELF_MIN_ALIGN PAGE_SIZE
75 #ifndef ELF_CORE_EFLAGS
76 #define ELF_CORE_EFLAGS 0
79 #define ELF_PAGESTART(_v) ((_v) & ~(unsigned long)(ELF_MIN_ALIGN-1))
80 #define ELF_PAGEOFFSET(_v) ((_v) & (ELF_MIN_ALIGN-1))
81 #define ELF_PAGEALIGN(_v) (((_v) + ELF_MIN_ALIGN - 1) & ~(ELF_MIN_ALIGN - 1))
83 static struct linux_binfmt elf_format = {
84 .module = THIS_MODULE,
85 .load_binary = load_elf_binary,
86 .load_shlib = load_elf_library,
87 .core_dump = elf_core_dump,
88 .min_coredump = ELF_EXEC_PAGESIZE
91 #define BAD_ADDR(x) ((unsigned long)(x) > TASK_SIZE)
93 static int set_brk(unsigned long start, unsigned long end)
95 start = ELF_PAGEALIGN(start);
96 end = ELF_PAGEALIGN(end);
99 down_write(¤t->mm->mmap_sem);
100 addr = do_brk(start, end - start);
101 up_write(¤t->mm->mmap_sem);
105 current->mm->start_brk = current->mm->brk = end;
110 /* We need to explicitly zero any fractional pages
111 after the data section (i.e. bss). This would
112 contain the junk from the file that should not
116 static int padzero(unsigned long elf_bss)
120 nbyte = ELF_PAGEOFFSET(elf_bss);
122 nbyte = ELF_MIN_ALIGN - nbyte;
123 if (clear_user((void __user *) elf_bss, nbyte))
129 /* Let's use some macros to make this stack manipulation a litle clearer */
130 #ifdef CONFIG_STACK_GROWSUP
131 #define STACK_ADD(sp, items) ((elf_addr_t __user *)(sp) + (items))
132 #define STACK_ROUND(sp, items) \
133 ((15 + (unsigned long) ((sp) + (items))) &~ 15UL)
134 #define STACK_ALLOC(sp, len) ({ elf_addr_t __user *old_sp = (elf_addr_t __user *)sp; sp += len; old_sp; })
136 #define STACK_ADD(sp, items) ((elf_addr_t __user *)(sp) - (items))
137 #define STACK_ROUND(sp, items) \
138 (((unsigned long) (sp - items)) &~ 15UL)
139 #define STACK_ALLOC(sp, len) ({ sp -= len ; sp; })
143 create_elf_tables(struct linux_binprm *bprm, struct elfhdr * exec,
144 int interp_aout, unsigned long load_addr,
145 unsigned long interp_load_addr)
147 unsigned long p = bprm->p;
148 int argc = bprm->argc;
149 int envc = bprm->envc;
150 elf_addr_t __user *argv;
151 elf_addr_t __user *envp;
152 elf_addr_t __user *sp;
153 elf_addr_t __user *u_platform;
154 const char *k_platform = ELF_PLATFORM;
156 elf_addr_t *elf_info;
158 struct task_struct *tsk = current;
161 * If this architecture has a platform capability string, copy it
162 * to userspace. In some cases (Sparc), this info is impossible
163 * for userspace to get any other way, in others (i386) it is
169 size_t len = strlen(k_platform) + 1;
172 * In some cases (e.g. Hyper-Threading), we want to avoid L1
173 * evictions by the processes running on the same package. One
174 * thing we can do is to shuffle the initial stack for them.
177 p = arch_align_stack(p);
179 u_platform = (elf_addr_t __user *)STACK_ALLOC(p, len);
180 if (__copy_to_user(u_platform, k_platform, len))
184 /* Create the ELF interpreter info */
185 elf_info = (elf_addr_t *) current->mm->saved_auxv;
186 #define NEW_AUX_ENT(id, val) \
187 do { elf_info[ei_index++] = id; elf_info[ei_index++] = val; } while (0)
191 * ARCH_DLINFO must come first so PPC can do its special alignment of
196 NEW_AUX_ENT(AT_HWCAP, ELF_HWCAP);
197 NEW_AUX_ENT(AT_PAGESZ, ELF_EXEC_PAGESIZE);
198 NEW_AUX_ENT(AT_CLKTCK, CLOCKS_PER_SEC);
199 NEW_AUX_ENT(AT_PHDR, load_addr + exec->e_phoff);
200 NEW_AUX_ENT(AT_PHENT, sizeof (struct elf_phdr));
201 NEW_AUX_ENT(AT_PHNUM, exec->e_phnum);
202 NEW_AUX_ENT(AT_BASE, interp_load_addr);
203 NEW_AUX_ENT(AT_FLAGS, 0);
204 NEW_AUX_ENT(AT_ENTRY, exec->e_entry);
205 NEW_AUX_ENT(AT_UID, (elf_addr_t) tsk->uid);
206 NEW_AUX_ENT(AT_EUID, (elf_addr_t) tsk->euid);
207 NEW_AUX_ENT(AT_GID, (elf_addr_t) tsk->gid);
208 NEW_AUX_ENT(AT_EGID, (elf_addr_t) tsk->egid);
209 NEW_AUX_ENT(AT_SECURE, (elf_addr_t) security_bprm_secureexec(bprm));
211 NEW_AUX_ENT(AT_PLATFORM, (elf_addr_t)(unsigned long)u_platform);
213 if (bprm->interp_flags & BINPRM_FLAGS_EXECFD) {
214 NEW_AUX_ENT(AT_EXECFD, (elf_addr_t) bprm->interp_data);
217 /* AT_NULL is zero; clear the rest too */
218 memset(&elf_info[ei_index], 0,
219 sizeof current->mm->saved_auxv - ei_index * sizeof elf_info[0]);
221 /* And advance past the AT_NULL entry. */
224 sp = STACK_ADD(p, ei_index);
226 items = (argc + 1) + (envc + 1);
228 items += 3; /* a.out interpreters require argv & envp too */
230 items += 1; /* ELF interpreters only put argc on the stack */
232 bprm->p = STACK_ROUND(sp, items);
234 /* Point sp at the lowest address on the stack */
235 #ifdef CONFIG_STACK_GROWSUP
236 sp = (elf_addr_t __user *)bprm->p - items - ei_index;
237 bprm->exec = (unsigned long) sp; /* XXX: PARISC HACK */
239 sp = (elf_addr_t __user *)bprm->p;
242 /* Now, let's put argc (and argv, envp if appropriate) on the stack */
243 if (__put_user(argc, sp++))
247 envp = argv + argc + 1;
248 __put_user((elf_addr_t)(unsigned long)argv, sp++);
249 __put_user((elf_addr_t)(unsigned long)envp, sp++);
252 envp = argv + argc + 1;
255 /* Populate argv and envp */
256 p = current->mm->arg_end = current->mm->arg_start;
259 __put_user((elf_addr_t)p, argv++);
260 len = strnlen_user((void __user *)p, PAGE_SIZE*MAX_ARG_PAGES);
261 if (!len || len > PAGE_SIZE*MAX_ARG_PAGES)
265 if (__put_user(0, argv))
267 current->mm->arg_end = current->mm->env_start = p;
270 __put_user((elf_addr_t)p, envp++);
271 len = strnlen_user((void __user *)p, PAGE_SIZE*MAX_ARG_PAGES);
272 if (!len || len > PAGE_SIZE*MAX_ARG_PAGES)
276 if (__put_user(0, envp))
278 current->mm->env_end = p;
280 /* Put the elf_info on the stack in the right place. */
281 sp = (elf_addr_t __user *)envp + 1;
282 if (copy_to_user(sp, elf_info, ei_index * sizeof(elf_addr_t)))
289 static unsigned long elf_map(struct file *filep, unsigned long addr,
290 struct elf_phdr *eppnt, int prot, int type,
291 unsigned long total_size)
293 unsigned long map_addr;
294 unsigned long size = eppnt->p_filesz + ELF_PAGEOFFSET(eppnt->p_vaddr);
295 unsigned long off = eppnt->p_offset - ELF_PAGEOFFSET(eppnt->p_vaddr);
297 addr = ELF_PAGESTART(addr);
298 size = ELF_PAGEALIGN(size);
300 down_write(¤t->mm->mmap_sem);
303 * total_size is the size of the ELF (interpreter) image.
304 * The _first_ mmap needs to know the full size, otherwise
305 * randomization might put this image into an overlapping
306 * position with the ELF binary image. (since size < total_size)
307 * So we first map the 'big' image - and unmap the remainder at
308 * the end. (which unmap is needed for ELF images with holes.)
311 total_size = ELF_PAGEALIGN(total_size);
312 map_addr = do_mmap(filep, addr, total_size, prot, type, off);
313 if (!BAD_ADDR(map_addr))
314 do_munmap(current->mm, map_addr+size, total_size-size);
316 map_addr = do_mmap(filep, addr, size, prot, type, off);
318 up_write(¤t->mm->mmap_sem);
323 #endif /* !elf_map */
325 static inline unsigned long total_mapping_size(struct elf_phdr *cmds, int nr)
327 int i, first_idx = -1, last_idx = -1;
329 for (i = 0; i < nr; i++)
330 if (cmds[i].p_type == PT_LOAD) {
339 return cmds[last_idx].p_vaddr + cmds[last_idx].p_memsz -
340 ELF_PAGESTART(cmds[first_idx].p_vaddr);
343 /* This is much more generalized than the library routine read function,
344 so we keep this separate. Technically the library read function
345 is only provided so that we can read a.out libraries that have
348 static unsigned long load_elf_interp(struct elfhdr * interp_elf_ex,
349 struct file * interpreter,
350 unsigned long *interp_load_addr,
351 unsigned long no_base)
353 struct elf_phdr *elf_phdata;
354 struct elf_phdr *eppnt;
355 unsigned long load_addr = 0;
356 int load_addr_set = 0;
357 unsigned long last_bss = 0, elf_bss = 0;
358 unsigned long error = ~0UL;
359 unsigned long total_size;
362 /* First of all, some simple consistency checks */
363 if (interp_elf_ex->e_type != ET_EXEC &&
364 interp_elf_ex->e_type != ET_DYN)
366 if (!elf_check_arch(interp_elf_ex))
368 if (!interpreter->f_op || !interpreter->f_op->mmap)
372 * If the size of this structure has changed, then punt, since
373 * we will be doing the wrong thing.
375 if (interp_elf_ex->e_phentsize != sizeof(struct elf_phdr))
377 if (interp_elf_ex->e_phnum < 1 ||
378 interp_elf_ex->e_phnum > 65536U / sizeof(struct elf_phdr))
381 /* Now read in all of the header information */
383 size = sizeof(struct elf_phdr) * interp_elf_ex->e_phnum;
384 if (size > ELF_MIN_ALIGN)
386 elf_phdata = (struct elf_phdr *) kmalloc(size, GFP_KERNEL);
390 retval = kernel_read(interpreter,interp_elf_ex->e_phoff,(char *)elf_phdata,size);
392 if (retval != size) {
398 total_size = total_mapping_size(elf_phdata, interp_elf_ex->e_phnum);
403 for (i=0; i<interp_elf_ex->e_phnum; i++, eppnt++) {
404 if (eppnt->p_type == PT_LOAD) {
405 int elf_type = MAP_PRIVATE | MAP_DENYWRITE;
407 unsigned long vaddr = 0;
408 unsigned long k, map_addr;
410 if (eppnt->p_flags & PF_R) elf_prot = PROT_READ;
411 if (eppnt->p_flags & PF_W) elf_prot |= PROT_WRITE;
412 if (eppnt->p_flags & PF_X) elf_prot |= PROT_EXEC;
413 vaddr = eppnt->p_vaddr;
414 if (interp_elf_ex->e_type == ET_EXEC || load_addr_set)
415 elf_type |= MAP_FIXED;
416 else if (no_base && interp_elf_ex->e_type == ET_DYN)
419 map_addr = elf_map(interpreter, load_addr + vaddr, eppnt, elf_prot, elf_type, total_size);
422 if (BAD_ADDR(map_addr))
425 if (!load_addr_set && interp_elf_ex->e_type == ET_DYN) {
426 load_addr = map_addr - ELF_PAGESTART(vaddr);
431 * Check to see if the section's size will overflow the
432 * allowed task size. Note that p_filesz must always be
433 * <= p_memsize so it is only necessary to check p_memsz.
435 k = load_addr + eppnt->p_vaddr;
436 if (k > TASK_SIZE || eppnt->p_filesz > eppnt->p_memsz ||
437 eppnt->p_memsz > TASK_SIZE || TASK_SIZE - eppnt->p_memsz < k) {
443 * Find the end of the file mapping for this phdr, and keep
444 * track of the largest address we see for this.
446 k = load_addr + eppnt->p_vaddr + eppnt->p_filesz;
451 * Do the same thing for the memory mapping - between
452 * elf_bss and last_bss is the bss section.
454 k = load_addr + eppnt->p_memsz + eppnt->p_vaddr;
461 * Now fill out the bss section. First pad the last page up
462 * to the page boundary, and then perform a mmap to make sure
463 * that there are zero-mapped pages up to and including the
466 if (padzero(elf_bss)) {
471 elf_bss = ELF_PAGESTART(elf_bss + ELF_MIN_ALIGN - 1); /* What we have mapped so far */
473 /* Map the last of the bss segment */
474 if (last_bss > elf_bss) {
475 down_write(¤t->mm->mmap_sem);
476 error = do_brk(elf_bss, last_bss - elf_bss);
477 up_write(¤t->mm->mmap_sem);
482 *interp_load_addr = load_addr;
483 error = ((unsigned long) interp_elf_ex->e_entry) + load_addr;
491 static unsigned long load_aout_interp(struct exec * interp_ex,
492 struct file * interpreter)
494 unsigned long text_data, elf_entry = ~0UL;
498 current->mm->end_code = interp_ex->a_text;
499 text_data = interp_ex->a_text + interp_ex->a_data;
500 current->mm->end_data = text_data;
501 current->mm->brk = interp_ex->a_bss + text_data;
503 switch (N_MAGIC(*interp_ex)) {
506 addr = (char __user *)0;
510 offset = N_TXTOFF(*interp_ex);
511 addr = (char __user *) N_TXTADDR(*interp_ex);
517 down_write(¤t->mm->mmap_sem);
518 do_brk(0, text_data);
519 up_write(¤t->mm->mmap_sem);
520 if (!interpreter->f_op || !interpreter->f_op->read)
522 if (interpreter->f_op->read(interpreter, addr, text_data, &offset) < 0)
524 flush_icache_range((unsigned long)addr,
525 (unsigned long)addr + text_data);
528 down_write(¤t->mm->mmap_sem);
529 do_brk(ELF_PAGESTART(text_data + ELF_MIN_ALIGN - 1),
531 up_write(¤t->mm->mmap_sem);
532 elf_entry = interp_ex->a_entry;
539 * These are the functions used to load ELF style executables and shared
540 * libraries. There is no binary dependent code anywhere else.
543 #define INTERPRETER_NONE 0
544 #define INTERPRETER_AOUT 1
545 #define INTERPRETER_ELF 2
548 static unsigned long randomize_stack_top(unsigned long stack_top)
550 unsigned int random_variable = 0;
552 if (current->flags & PF_RANDOMIZE)
553 random_variable = get_random_int() % (8*1024*1024);
554 #ifdef CONFIG_STACK_GROWSUP
555 return PAGE_ALIGN(stack_top + random_variable);
557 return PAGE_ALIGN(stack_top - random_variable);
561 static int load_elf_binary(struct linux_binprm * bprm, struct pt_regs * regs)
563 struct file *interpreter = NULL; /* to shut gcc up */
564 unsigned long load_addr = 0, load_bias = 0;
565 int load_addr_set = 0;
566 char * elf_interpreter = NULL;
567 unsigned int interpreter_type = INTERPRETER_NONE;
568 unsigned char ibcs2_interpreter = 0;
570 struct elf_phdr * elf_ppnt, *elf_phdata;
571 unsigned long elf_bss, elf_brk;
575 unsigned long elf_entry, interp_load_addr = 0;
576 unsigned long start_code, end_code, start_data, end_data;
577 unsigned long reloc_func_desc = 0;
578 char passed_fileno[6];
579 struct files_struct *files;
580 int have_pt_gnu_stack, executable_stack;
581 unsigned long def_flags = 0;
583 struct elfhdr elf_ex;
584 struct elfhdr interp_elf_ex;
585 struct exec interp_ex;
588 loc = kmalloc(sizeof(*loc), GFP_KERNEL);
594 /* Get the exec-header */
595 loc->elf_ex = *((struct elfhdr *) bprm->buf);
598 /* First of all, some simple consistency checks */
599 if (memcmp(loc->elf_ex.e_ident, ELFMAG, SELFMAG) != 0)
602 if (loc->elf_ex.e_type != ET_EXEC && loc->elf_ex.e_type != ET_DYN)
604 if (!elf_check_arch(&loc->elf_ex))
606 if (!bprm->file->f_op||!bprm->file->f_op->mmap)
609 /* Now read in all of the header information */
611 if (loc->elf_ex.e_phentsize != sizeof(struct elf_phdr))
613 if (loc->elf_ex.e_phnum < 1 ||
614 loc->elf_ex.e_phnum > 65536U / sizeof(struct elf_phdr))
616 size = loc->elf_ex.e_phnum * sizeof(struct elf_phdr);
618 elf_phdata = (struct elf_phdr *) kmalloc(size, GFP_KERNEL);
622 retval = kernel_read(bprm->file, loc->elf_ex.e_phoff, (char *) elf_phdata, size);
623 if (retval != size) {
629 files = current->files; /* Refcounted so ok */
630 retval = unshare_files();
633 if (files == current->files) {
634 put_files_struct(files);
638 /* exec will make our files private anyway, but for the a.out
639 loader stuff we need to do it earlier */
641 retval = get_unused_fd();
644 get_file(bprm->file);
645 fd_install(elf_exec_fileno = retval, bprm->file);
647 elf_ppnt = elf_phdata;
656 for (i = 0; i < loc->elf_ex.e_phnum; i++) {
657 if (elf_ppnt->p_type == PT_INTERP) {
658 /* This is the program interpreter used for
659 * shared libraries - for now assume that this
660 * is an a.out format binary
664 if (elf_ppnt->p_filesz > PATH_MAX ||
665 elf_ppnt->p_filesz < 2)
669 elf_interpreter = (char *) kmalloc(elf_ppnt->p_filesz,
671 if (!elf_interpreter)
674 retval = kernel_read(bprm->file, elf_ppnt->p_offset,
677 if (retval != elf_ppnt->p_filesz) {
680 goto out_free_interp;
682 /* make sure path is NULL terminated */
684 if (elf_interpreter[elf_ppnt->p_filesz - 1] != '\0')
685 goto out_free_interp;
687 /* If the program interpreter is one of these two,
688 * then assume an iBCS2 image. Otherwise assume
689 * a native linux image.
691 if (strcmp(elf_interpreter,"/usr/lib/libc.so.1") == 0 ||
692 strcmp(elf_interpreter,"/usr/lib/ld.so.1") == 0)
693 ibcs2_interpreter = 1;
696 * The early SET_PERSONALITY here is so that the lookup
697 * for the interpreter happens in the namespace of the
698 * to-be-execed image. SET_PERSONALITY can select an
701 * However, SET_PERSONALITY is NOT allowed to switch
702 * this task into the new images's memory mapping
703 * policy - that is, TASK_SIZE must still evaluate to
704 * that which is appropriate to the execing application.
705 * This is because exit_mmap() needs to have TASK_SIZE
706 * evaluate to the size of the old image.
708 * So if (say) a 64-bit application is execing a 32-bit
709 * application it is the architecture's responsibility
710 * to defer changing the value of TASK_SIZE until the
711 * switch really is going to happen - do this in
712 * flush_thread(). - akpm
714 SET_PERSONALITY(loc->elf_ex, ibcs2_interpreter);
716 interpreter = open_exec(elf_interpreter);
717 retval = PTR_ERR(interpreter);
718 if (IS_ERR(interpreter))
719 goto out_free_interp;
720 retval = kernel_read(interpreter, 0, bprm->buf, BINPRM_BUF_SIZE);
721 if (retval != BINPRM_BUF_SIZE) {
724 goto out_free_dentry;
727 /* Get the exec headers */
728 loc->interp_ex = *((struct exec *) bprm->buf);
729 loc->interp_elf_ex = *((struct elfhdr *) bprm->buf);
735 elf_ppnt = elf_phdata;
736 executable_stack = EXSTACK_DEFAULT;
738 for (i = 0; i < loc->elf_ex.e_phnum; i++, elf_ppnt++)
739 if (elf_ppnt->p_type == PT_GNU_STACK) {
740 if (elf_ppnt->p_flags & PF_X)
741 executable_stack = EXSTACK_ENABLE_X;
743 executable_stack = EXSTACK_DISABLE_X;
746 have_pt_gnu_stack = (i < loc->elf_ex.e_phnum);
748 if (current->personality == PER_LINUX && exec_shield == 2) {
749 executable_stack = EXSTACK_DISABLE_X;
750 current->flags |= PF_RANDOMIZE;
753 /* Some simple consistency checks for the interpreter */
754 if (elf_interpreter) {
755 interpreter_type = INTERPRETER_ELF | INTERPRETER_AOUT;
757 /* Now figure out which format our binary is */
758 if ((N_MAGIC(loc->interp_ex) != OMAGIC) &&
759 (N_MAGIC(loc->interp_ex) != ZMAGIC) &&
760 (N_MAGIC(loc->interp_ex) != QMAGIC))
761 interpreter_type = INTERPRETER_ELF;
763 if (memcmp(loc->interp_elf_ex.e_ident, ELFMAG, SELFMAG) != 0)
764 interpreter_type &= ~INTERPRETER_ELF;
767 if (!interpreter_type)
768 goto out_free_dentry;
770 /* Make sure only one type was selected */
771 if ((interpreter_type & INTERPRETER_ELF) &&
772 interpreter_type != INTERPRETER_ELF) {
773 // FIXME - ratelimit this before re-enabling
774 // printk(KERN_WARNING "ELF: Ambiguous type, using ELF\n");
775 interpreter_type = INTERPRETER_ELF;
777 /* Verify the interpreter has a valid arch */
778 if ((interpreter_type == INTERPRETER_ELF) &&
779 !elf_check_arch(&loc->interp_elf_ex))
780 goto out_free_dentry;
782 /* Executables without an interpreter also need a personality */
783 SET_PERSONALITY(loc->elf_ex, ibcs2_interpreter);
786 /* OK, we are done with that, now set up the arg stuff,
787 and then start this sucker up */
789 if ((!bprm->sh_bang) && (interpreter_type == INTERPRETER_AOUT)) {
790 char *passed_p = passed_fileno;
791 sprintf(passed_fileno, "%d", elf_exec_fileno);
793 if (elf_interpreter) {
794 retval = copy_strings_kernel(1, &passed_p, bprm);
796 goto out_free_dentry;
801 /* Flush all traces of the currently running executable */
802 retval = flush_old_exec(bprm);
804 goto out_free_dentry;
808 * Turn off the CS limit completely if exec-shield disabled or
811 if (!exec_shield || executable_stack != EXSTACK_DISABLE_X || nx_enabled)
812 arch_add_exec_range(current->mm, -1);
815 /* Discard our unneeded old files struct */
818 put_files_struct(files);
822 /* OK, This is the point of no return */
823 current->mm->start_data = 0;
824 current->mm->end_data = 0;
825 current->mm->end_code = 0;
826 current->mm->mmap = NULL;
827 current->flags &= ~PF_FORKNOEXEC;
828 current->mm->def_flags = def_flags;
830 /* Do this immediately, since STACK_TOP as used in setup_arg_pages
831 may depend on the personality. */
832 SET_PERSONALITY(loc->elf_ex, ibcs2_interpreter);
833 if (exec_shield != 2 &&
834 elf_read_implies_exec(loc->elf_ex, executable_stack))
835 current->personality |= READ_IMPLIES_EXEC;
837 if ( !(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
838 current->flags |= PF_RANDOMIZE;
839 arch_pick_mmap_layout(current->mm);
841 /* Do this so that we can load the interpreter, if need be. We will
842 change some of these later */
843 set_mm_counter(current->mm, rss, 0);
844 current->mm->free_area_cache = current->mm->mmap_base;
845 retval = setup_arg_pages(bprm, randomize_stack_top(STACK_TOP),
848 send_sig(SIGKILL, current, 0);
849 goto out_free_dentry;
852 current->mm->start_stack = bprm->p;
855 /* Now we do a little grungy work by mmaping the ELF image into
856 the correct location in memory.
859 for(i = 0, elf_ppnt = elf_phdata; i < loc->elf_ex.e_phnum; i++, elf_ppnt++) {
860 int elf_prot = 0, elf_flags;
861 unsigned long k, vaddr;
863 if (elf_ppnt->p_type != PT_LOAD)
866 if (unlikely (elf_brk > elf_bss)) {
869 /* There was a PT_LOAD segment with p_memsz > p_filesz
870 before this one. Map anonymous pages, if needed,
871 and clear the area. */
872 retval = set_brk (elf_bss + load_bias,
873 elf_brk + load_bias);
875 send_sig(SIGKILL, current, 0);
876 goto out_free_dentry;
878 nbyte = ELF_PAGEOFFSET(elf_bss);
880 nbyte = ELF_MIN_ALIGN - nbyte;
881 if (nbyte > elf_brk - elf_bss)
882 nbyte = elf_brk - elf_bss;
883 if (clear_user((void __user *)elf_bss +
886 * This bss-zeroing can fail if the ELF
887 * file specifies odd protections. So
888 * we don't check the return value
894 if (elf_ppnt->p_flags & PF_R) elf_prot |= PROT_READ;
895 if (elf_ppnt->p_flags & PF_W) elf_prot |= PROT_WRITE;
896 if (elf_ppnt->p_flags & PF_X) elf_prot |= PROT_EXEC;
898 elf_flags = MAP_PRIVATE|MAP_DENYWRITE|MAP_EXECUTABLE;
900 vaddr = elf_ppnt->p_vaddr;
901 if (loc->elf_ex.e_type == ET_EXEC || load_addr_set)
902 elf_flags |= MAP_FIXED;
903 else if (loc->elf_ex.e_type == ET_DYN)
907 load_bias = ELF_PAGESTART(ELF_ET_DYN_BASE - vaddr);
910 error = elf_map(bprm->file, load_bias + vaddr, elf_ppnt, elf_prot, elf_flags, 0);
911 if (BAD_ADDR(error)) {
912 send_sig(SIGKILL, current, 0);
913 goto out_free_dentry;
916 if (!load_addr_set) {
918 load_addr = (elf_ppnt->p_vaddr - elf_ppnt->p_offset);
919 if (loc->elf_ex.e_type == ET_DYN) {
921 ELF_PAGESTART(load_bias + vaddr);
922 load_addr += load_bias;
923 reloc_func_desc = load_bias;
926 k = elf_ppnt->p_vaddr;
927 if (k < start_code) start_code = k;
928 if (start_data < k) start_data = k;
931 * Check to see if the section's size will overflow the
932 * allowed task size. Note that p_filesz must always be
933 * <= p_memsz so it is only necessary to check p_memsz.
935 if (k > TASK_SIZE || elf_ppnt->p_filesz > elf_ppnt->p_memsz ||
936 elf_ppnt->p_memsz > TASK_SIZE ||
937 TASK_SIZE - elf_ppnt->p_memsz < k) {
938 /* set_brk can never work. Avoid overflows. */
939 send_sig(SIGKILL, current, 0);
940 goto out_free_dentry;
943 k = elf_ppnt->p_vaddr + elf_ppnt->p_filesz;
947 if ((elf_ppnt->p_flags & PF_X) && end_code < k)
951 k = elf_ppnt->p_vaddr + elf_ppnt->p_memsz;
956 loc->elf_ex.e_entry += load_bias;
957 elf_bss += load_bias;
958 elf_brk += load_bias;
959 start_code += load_bias;
960 end_code += load_bias;
961 start_data += load_bias;
962 end_data += load_bias;
964 /* Calling set_brk effectively mmaps the pages that we need
965 * for the bss and break sections. We must do this before
966 * mapping in the interpreter, to make sure it doesn't wind
967 * up getting placed where the bss needs to go.
969 retval = set_brk(elf_bss, elf_brk);
971 send_sig(SIGKILL, current, 0);
972 goto out_free_dentry;
974 if (padzero(elf_bss)) {
975 send_sig(SIGSEGV, current, 0);
976 retval = -EFAULT; /* Nobody gets to see this, but.. */
977 goto out_free_dentry;
980 if (elf_interpreter) {
981 if (interpreter_type == INTERPRETER_AOUT)
982 elf_entry = load_aout_interp(&loc->interp_ex,
985 elf_entry = load_elf_interp(&loc->interp_elf_ex,
989 if (BAD_ADDR(elf_entry)) {
990 printk(KERN_ERR "Unable to load interpreter %.128s\n",
992 force_sig(SIGSEGV, current);
993 retval = -ENOEXEC; /* Nobody gets to see this, but.. */
994 goto out_free_dentry;
996 reloc_func_desc = interp_load_addr;
998 allow_write_access(interpreter);
1000 kfree(elf_interpreter);
1002 elf_entry = loc->elf_ex.e_entry;
1005 if (interpreter_type != INTERPRETER_AOUT)
1006 sys_close(elf_exec_fileno);
1008 set_binfmt(&elf_format);
1010 #ifdef ARCH_HAS_SETUP_ADDITIONAL_PAGES
1011 retval = arch_setup_additional_pages(bprm, executable_stack);
1013 send_sig(SIGKILL, current, 0);
1016 #endif /* ARCH_HAS_SETUP_ADDITIONAL_PAGES */
1020 compute_creds(bprm);
1021 current->flags &= ~PF_FORKNOEXEC;
1022 create_elf_tables(bprm, &loc->elf_ex, (interpreter_type == INTERPRETER_AOUT),
1023 load_addr, interp_load_addr);
1024 /* N.B. passed_fileno might not be initialized? */
1025 if (interpreter_type == INTERPRETER_AOUT)
1026 current->mm->arg_start += strlen(passed_fileno) + 1;
1027 current->mm->end_code = end_code;
1028 current->mm->start_code = start_code;
1029 current->mm->start_data = start_data;
1030 current->mm->end_data = end_data;
1031 current->mm->start_stack = bprm->p;
1033 #ifdef __HAVE_ARCH_RANDOMIZE_BRK
1034 if (current->flags & PF_RANDOMIZE)
1035 randomize_brk(elf_brk);
1037 if (current->personality & MMAP_PAGE_ZERO) {
1038 /* Why this, you ask??? Well SVr4 maps page 0 as read-only,
1039 and some applications "depend" upon this behavior.
1040 Since we do not have the power to recompile these, we
1041 emulate the SVr4 behavior. Sigh. */
1042 down_write(¤t->mm->mmap_sem);
1043 error = do_mmap(NULL, 0, PAGE_SIZE, PROT_READ | PROT_EXEC,
1044 MAP_FIXED | MAP_PRIVATE, 0);
1045 up_write(¤t->mm->mmap_sem);
1048 #ifdef ELF_PLAT_INIT
1050 * The ABI may specify that certain registers be set up in special
1051 * ways (on i386 %edx is the address of a DT_FINI function, for
1052 * example. In addition, it may also specify (eg, PowerPC64 ELF)
1053 * that the e_entry field is the address of the function descriptor
1054 * for the startup routine, rather than the address of the startup
1055 * routine itself. This macro performs whatever initialization to
1056 * the regs structure is required as well as any relocations to the
1057 * function descriptor entries when executing dynamically links apps.
1059 ELF_PLAT_INIT(regs, reloc_func_desc);
1062 start_thread(regs, elf_entry, bprm->p);
1063 if (unlikely(current->ptrace & PT_PTRACED)) {
1064 if (current->ptrace & PT_TRACE_EXEC)
1065 ptrace_notify ((PTRACE_EVENT_EXEC << 8) | SIGTRAP);
1067 send_sig(SIGTRAP, current, 0);
1077 allow_write_access(interpreter);
1081 if (elf_interpreter)
1082 kfree(elf_interpreter);
1084 sys_close(elf_exec_fileno);
1087 put_files_struct(current->files);
1088 current->files = files;
1095 /* This is really simpleminded and specialized - we are loading an
1096 a.out library that is given an ELF header. */
1098 static int load_elf_library(struct file *file)
1100 struct elf_phdr *elf_phdata;
1101 struct elf_phdr *eppnt;
1102 unsigned long elf_bss, bss, len;
1103 int retval, error, i, j;
1104 struct elfhdr elf_ex;
1107 retval = kernel_read(file, 0, (char *) &elf_ex, sizeof(elf_ex));
1108 if (retval != sizeof(elf_ex))
1111 if (memcmp(elf_ex.e_ident, ELFMAG, SELFMAG) != 0)
1114 /* First of all, some simple consistency checks */
1115 if (elf_ex.e_type != ET_EXEC || elf_ex.e_phnum > 2 ||
1116 !elf_check_arch(&elf_ex) || !file->f_op || !file->f_op->mmap)
1119 /* Now read in all of the header information */
1121 j = sizeof(struct elf_phdr) * elf_ex.e_phnum;
1122 /* j < ELF_MIN_ALIGN because elf_ex.e_phnum <= 2 */
1125 elf_phdata = kmalloc(j, GFP_KERNEL);
1131 retval = kernel_read(file, elf_ex.e_phoff, (char *)eppnt, j);
1135 for (j = 0, i = 0; i<elf_ex.e_phnum; i++)
1136 if ((eppnt + i)->p_type == PT_LOAD)
1141 while (eppnt->p_type != PT_LOAD)
1144 /* Now use mmap to map the library into memory. */
1145 down_write(¤t->mm->mmap_sem);
1146 error = do_mmap(file,
1147 ELF_PAGESTART(eppnt->p_vaddr),
1149 ELF_PAGEOFFSET(eppnt->p_vaddr)),
1150 PROT_READ | PROT_WRITE | PROT_EXEC,
1151 MAP_FIXED | MAP_PRIVATE | MAP_DENYWRITE,
1153 ELF_PAGEOFFSET(eppnt->p_vaddr)));
1154 up_write(¤t->mm->mmap_sem);
1155 if (error != ELF_PAGESTART(eppnt->p_vaddr))
1158 elf_bss = eppnt->p_vaddr + eppnt->p_filesz;
1159 if (padzero(elf_bss)) {
1164 len = ELF_PAGESTART(eppnt->p_filesz + eppnt->p_vaddr + ELF_MIN_ALIGN - 1);
1165 bss = eppnt->p_memsz + eppnt->p_vaddr;
1167 down_write(¤t->mm->mmap_sem);
1168 do_brk(len, bss - len);
1169 up_write(¤t->mm->mmap_sem);
1180 * Note that some platforms still use traditional core dumps and not
1181 * the ELF core dump. Each platform can select it as appropriate.
1183 #ifdef USE_ELF_CORE_DUMP
1188 * Modelled on fs/exec.c:aout_core_dump()
1189 * Jeremy Fitzhardinge <jeremy@sw.oz.au>
1192 * These are the only things you should do on a core-file: use only these
1193 * functions to write out all the necessary info.
1195 static int dump_write(struct file *file, const void *addr, int nr)
1197 return file->f_op->write(file, addr, nr, &file->f_pos) == nr;
1200 static int dump_seek(struct file *file, loff_t off)
1202 if (file->f_op->llseek) {
1203 if (file->f_op->llseek(file, off, 0) != off)
1211 * Decide whether a segment is worth dumping; default is yes to be
1212 * sure (missing info is worse than too much; etc).
1213 * Personally I'd include everything, and use the coredump limit...
1215 * I think we should skip something. But I am not sure how. H.J.
1217 static int maydump(struct vm_area_struct *vma)
1219 /* Do not dump I/O mapped devices or special mappings */
1220 if (vma->vm_flags & (VM_IO | VM_RESERVED))
1223 if (vma->vm_flags & VM_DONTEXPAND) /* Kludge for vDSO. */
1226 /* Dump shared memory only if mapped from an anonymous file. */
1227 if (vma->vm_flags & VM_SHARED)
1228 return vma->vm_file->f_dentry->d_inode->i_nlink == 0;
1230 /* If it hasn't been written to, don't write it out */
1237 #define roundup(x, y) ((((x)+((y)-1))/(y))*(y))
1239 /* An ELF note in memory */
1244 unsigned int datasz;
1248 static int notesize(struct memelfnote *en)
1252 sz = sizeof(struct elf_note);
1253 sz += roundup(strlen(en->name) + 1, 4);
1254 sz += roundup(en->datasz, 4);
1259 #define DUMP_WRITE(addr, nr) \
1260 do { if (!dump_write(file, (addr), (nr))) return 0; } while(0)
1261 #define DUMP_SEEK(off) \
1262 do { if (!dump_seek(file, (off))) return 0; } while(0)
1264 static int writenote(struct memelfnote *men, struct file *file)
1268 en.n_namesz = strlen(men->name) + 1;
1269 en.n_descsz = men->datasz;
1270 en.n_type = men->type;
1272 DUMP_WRITE(&en, sizeof(en));
1273 DUMP_WRITE(men->name, en.n_namesz);
1274 /* XXX - cast from long long to long to avoid need for libgcc.a */
1275 DUMP_SEEK(roundup((unsigned long)file->f_pos, 4)); /* XXX */
1276 DUMP_WRITE(men->data, men->datasz);
1277 DUMP_SEEK(roundup((unsigned long)file->f_pos, 4)); /* XXX */
1284 #define DUMP_WRITE(addr, nr) \
1285 if ((size += (nr)) > limit || !dump_write(file, (addr), (nr))) \
1287 #define DUMP_SEEK(off) \
1288 if (!dump_seek(file, (off))) \
1291 static inline void fill_elf_header(struct elfhdr *elf, int segs)
1293 memcpy(elf->e_ident, ELFMAG, SELFMAG);
1294 elf->e_ident[EI_CLASS] = ELF_CLASS;
1295 elf->e_ident[EI_DATA] = ELF_DATA;
1296 elf->e_ident[EI_VERSION] = EV_CURRENT;
1297 elf->e_ident[EI_OSABI] = ELF_OSABI;
1298 memset(elf->e_ident+EI_PAD, 0, EI_NIDENT-EI_PAD);
1300 elf->e_type = ET_CORE;
1301 elf->e_machine = ELF_ARCH;
1302 elf->e_version = EV_CURRENT;
1304 elf->e_phoff = sizeof(struct elfhdr);
1306 elf->e_flags = ELF_CORE_EFLAGS;
1307 elf->e_ehsize = sizeof(struct elfhdr);
1308 elf->e_phentsize = sizeof(struct elf_phdr);
1309 elf->e_phnum = segs;
1310 elf->e_shentsize = 0;
1312 elf->e_shstrndx = 0;
1316 static inline void fill_elf_note_phdr(struct elf_phdr *phdr, int sz, off_t offset)
1318 phdr->p_type = PT_NOTE;
1319 phdr->p_offset = offset;
1322 phdr->p_filesz = sz;
1329 static void fill_note(struct memelfnote *note, const char *name, int type,
1330 unsigned int sz, void *data)
1340 * fill up all the fields in prstatus from the given task struct, except registers
1341 * which need to be filled up separately.
1343 static void fill_prstatus(struct elf_prstatus *prstatus,
1344 struct task_struct *p, long signr)
1346 prstatus->pr_info.si_signo = prstatus->pr_cursig = signr;
1347 prstatus->pr_sigpend = p->pending.signal.sig[0];
1348 prstatus->pr_sighold = p->blocked.sig[0];
1349 prstatus->pr_pid = p->pid;
1350 prstatus->pr_ppid = p->parent->pid;
1351 prstatus->pr_pgrp = process_group(p);
1352 prstatus->pr_sid = p->signal->session;
1353 if (thread_group_leader(p)) {
1355 * This is the record for the group leader. Add in the
1356 * cumulative times of previous dead threads. This total
1357 * won't include the time of each live thread whose state
1358 * is included in the core dump. The final total reported
1359 * to our parent process when it calls wait4 will include
1360 * those sums as well as the little bit more time it takes
1361 * this and each other thread to finish dying after the
1362 * core dump synchronization phase.
1364 cputime_to_timeval(cputime_add(p->utime, p->signal->utime),
1365 &prstatus->pr_utime);
1366 cputime_to_timeval(cputime_add(p->stime, p->signal->stime),
1367 &prstatus->pr_stime);
1369 cputime_to_timeval(p->utime, &prstatus->pr_utime);
1370 cputime_to_timeval(p->stime, &prstatus->pr_stime);
1372 cputime_to_timeval(p->signal->cutime, &prstatus->pr_cutime);
1373 cputime_to_timeval(p->signal->cstime, &prstatus->pr_cstime);
1376 static int fill_psinfo(struct elf_prpsinfo *psinfo, struct task_struct *p,
1377 struct mm_struct *mm)
1379 unsigned int i, len;
1381 /* first copy the parameters from user space */
1382 memset(psinfo, 0, sizeof(struct elf_prpsinfo));
1384 len = mm->arg_end - mm->arg_start;
1385 if (len >= ELF_PRARGSZ)
1386 len = ELF_PRARGSZ-1;
1387 if (copy_from_user(&psinfo->pr_psargs,
1388 (const char __user *)mm->arg_start, len))
1390 for(i = 0; i < len; i++)
1391 if (psinfo->pr_psargs[i] == 0)
1392 psinfo->pr_psargs[i] = ' ';
1393 psinfo->pr_psargs[len] = 0;
1395 psinfo->pr_pid = p->pid;
1396 psinfo->pr_ppid = p->parent->pid;
1397 psinfo->pr_pgrp = process_group(p);
1398 psinfo->pr_sid = p->signal->session;
1400 i = p->state ? ffz(~p->state) + 1 : 0;
1401 psinfo->pr_state = i;
1402 psinfo->pr_sname = (i < 0 || i > 5) ? '.' : "RSDTZW"[i];
1403 psinfo->pr_zomb = psinfo->pr_sname == 'Z';
1404 psinfo->pr_nice = task_nice(p);
1405 psinfo->pr_flag = p->flags;
1406 SET_UID(psinfo->pr_uid, p->uid);
1407 SET_GID(psinfo->pr_gid, p->gid);
1408 strncpy(psinfo->pr_fname, p->comm, sizeof(psinfo->pr_fname));
1413 /* Here is the structure in which status of each thread is captured. */
1414 struct elf_thread_status
1416 struct list_head list;
1417 struct elf_prstatus prstatus; /* NT_PRSTATUS */
1418 elf_fpregset_t fpu; /* NT_PRFPREG */
1419 struct task_struct *thread;
1420 #ifdef ELF_CORE_COPY_XFPREGS
1421 elf_fpxregset_t xfpu; /* NT_PRXFPREG */
1423 struct memelfnote notes[3];
1428 * In order to add the specific thread information for the elf file format,
1429 * we need to keep a linked list of every threads pr_status and then
1430 * create a single section for them in the final core file.
1432 static int elf_dump_thread_status(long signr, struct elf_thread_status *t)
1435 struct task_struct *p = t->thread;
1438 fill_prstatus(&t->prstatus, p, signr);
1439 elf_core_copy_task_regs(p, &t->prstatus.pr_reg);
1441 fill_note(&t->notes[0], "CORE", NT_PRSTATUS, sizeof(t->prstatus), &(t->prstatus));
1443 sz += notesize(&t->notes[0]);
1445 if ((t->prstatus.pr_fpvalid = elf_core_copy_task_fpregs(p, NULL, &t->fpu))) {
1446 fill_note(&t->notes[1], "CORE", NT_PRFPREG, sizeof(t->fpu), &(t->fpu));
1448 sz += notesize(&t->notes[1]);
1451 #ifdef ELF_CORE_COPY_XFPREGS
1452 if (elf_core_copy_task_xfpregs(p, &t->xfpu)) {
1453 fill_note(&t->notes[2], "LINUX", NT_PRXFPREG, sizeof(t->xfpu), &t->xfpu);
1455 sz += notesize(&t->notes[2]);
1464 * This is a two-pass process; first we find the offsets of the bits,
1465 * and then they are actually written out. If we run out of core limit
1468 static int elf_core_dump(long signr, struct pt_regs * regs, struct file * file)
1476 struct vm_area_struct *vma;
1477 struct elfhdr *elf = NULL;
1478 off_t offset = 0, dataoff;
1479 unsigned long limit = current->signal->rlim[RLIMIT_CORE].rlim_cur;
1481 struct memelfnote *notes = NULL;
1482 struct elf_prstatus *prstatus = NULL; /* NT_PRSTATUS */
1483 struct elf_prpsinfo *psinfo = NULL; /* NT_PRPSINFO */
1484 struct task_struct *g, *p;
1485 LIST_HEAD(thread_list);
1486 struct list_head *t;
1487 elf_fpregset_t *fpu = NULL;
1488 #ifdef ELF_CORE_COPY_XFPREGS
1489 elf_fpxregset_t *xfpu = NULL;
1491 int thread_status_size = 0;
1495 * We no longer stop all VM operations.
1497 * This is because those proceses that could possibly change map_count or
1498 * the mmap / vma pages are now blocked in do_exit on current finishing
1501 * Only ptrace can touch these memory addresses, but it doesn't change
1502 * the map_count or the pages allocated. So no possibility of crashing
1503 * exists while dumping the mm->vm_next areas to the core file.
1506 /* alloc memory for large data structures: too large to be on stack */
1507 elf = kmalloc(sizeof(*elf), GFP_KERNEL);
1510 prstatus = kmalloc(sizeof(*prstatus), GFP_KERNEL);
1513 psinfo = kmalloc(sizeof(*psinfo), GFP_KERNEL);
1516 notes = kmalloc(NUM_NOTES * sizeof(struct memelfnote), GFP_KERNEL);
1519 fpu = kmalloc(sizeof(*fpu), GFP_KERNEL);
1522 #ifdef ELF_CORE_COPY_XFPREGS
1523 xfpu = kmalloc(sizeof(*xfpu), GFP_KERNEL);
1529 struct elf_thread_status *tmp;
1530 read_lock(&tasklist_lock);
1532 if (current->mm == p->mm && current != p) {
1533 tmp = kmalloc(sizeof(*tmp), GFP_ATOMIC);
1535 read_unlock(&tasklist_lock);
1538 memset(tmp, 0, sizeof(*tmp));
1539 INIT_LIST_HEAD(&tmp->list);
1541 list_add(&tmp->list, &thread_list);
1543 while_each_thread(g,p);
1544 read_unlock(&tasklist_lock);
1545 list_for_each(t, &thread_list) {
1546 struct elf_thread_status *tmp;
1549 tmp = list_entry(t, struct elf_thread_status, list);
1550 sz = elf_dump_thread_status(signr, tmp);
1551 thread_status_size += sz;
1554 /* now collect the dump for the current */
1555 memset(prstatus, 0, sizeof(*prstatus));
1556 fill_prstatus(prstatus, current, signr);
1557 elf_core_copy_regs(&prstatus->pr_reg, regs);
1559 segs = current->mm->map_count;
1560 #ifdef ELF_CORE_EXTRA_PHDRS
1561 segs += ELF_CORE_EXTRA_PHDRS;
1565 fill_elf_header(elf, segs+1); /* including notes section */
1568 current->flags |= PF_DUMPCORE;
1571 * Set up the notes in similar form to SVR4 core dumps made
1572 * with info from their /proc.
1575 fill_note(notes +0, "CORE", NT_PRSTATUS, sizeof(*prstatus), prstatus);
1577 fill_psinfo(psinfo, current->group_leader, current->mm);
1578 fill_note(notes +1, "CORE", NT_PRPSINFO, sizeof(*psinfo), psinfo);
1580 fill_note(notes +2, "CORE", NT_TASKSTRUCT, sizeof(*current), current);
1584 auxv = (elf_addr_t *) current->mm->saved_auxv;
1589 while (auxv[i - 2] != AT_NULL);
1590 fill_note(¬es[numnote++], "CORE", NT_AUXV,
1591 i * sizeof (elf_addr_t), auxv);
1593 /* Try to dump the FPU. */
1594 if ((prstatus->pr_fpvalid = elf_core_copy_task_fpregs(current, regs, fpu)))
1595 fill_note(notes + numnote++,
1596 "CORE", NT_PRFPREG, sizeof(*fpu), fpu);
1597 #ifdef ELF_CORE_COPY_XFPREGS
1598 if (elf_core_copy_task_xfpregs(current, xfpu))
1599 fill_note(notes + numnote++,
1600 "LINUX", NT_PRXFPREG, sizeof(*xfpu), xfpu);
1606 DUMP_WRITE(elf, sizeof(*elf));
1607 offset += sizeof(*elf); /* Elf header */
1608 offset += (segs+1) * sizeof(struct elf_phdr); /* Program headers */
1610 /* Write notes phdr entry */
1612 struct elf_phdr phdr;
1615 for (i = 0; i < numnote; i++)
1616 sz += notesize(notes + i);
1618 sz += thread_status_size;
1620 fill_elf_note_phdr(&phdr, sz, offset);
1622 DUMP_WRITE(&phdr, sizeof(phdr));
1625 /* Page-align dumped data */
1626 dataoff = offset = roundup(offset, ELF_EXEC_PAGESIZE);
1628 /* Write program headers for segments dump */
1629 for (vma = current->mm->mmap; vma != NULL; vma = vma->vm_next) {
1630 struct elf_phdr phdr;
1633 sz = vma->vm_end - vma->vm_start;
1635 phdr.p_type = PT_LOAD;
1636 phdr.p_offset = offset;
1637 phdr.p_vaddr = vma->vm_start;
1639 phdr.p_filesz = maydump(vma) ? sz : 0;
1641 offset += phdr.p_filesz;
1642 phdr.p_flags = vma->vm_flags & VM_READ ? PF_R : 0;
1643 if (vma->vm_flags & VM_WRITE) phdr.p_flags |= PF_W;
1644 if (vma->vm_flags & VM_EXEC) phdr.p_flags |= PF_X;
1645 phdr.p_align = ELF_EXEC_PAGESIZE;
1647 DUMP_WRITE(&phdr, sizeof(phdr));
1650 #ifdef ELF_CORE_WRITE_EXTRA_PHDRS
1651 ELF_CORE_WRITE_EXTRA_PHDRS;
1654 /* write out the notes section */
1655 for (i = 0; i < numnote; i++)
1656 if (!writenote(notes + i, file))
1659 /* write out the thread status notes section */
1660 list_for_each(t, &thread_list) {
1661 struct elf_thread_status *tmp = list_entry(t, struct elf_thread_status, list);
1662 for (i = 0; i < tmp->num_notes; i++)
1663 if (!writenote(&tmp->notes[i], file))
1669 for (vma = current->mm->mmap; vma != NULL; vma = vma->vm_next) {
1675 for (addr = vma->vm_start;
1677 addr += PAGE_SIZE) {
1679 struct vm_area_struct *vma;
1681 if (get_user_pages(current, current->mm, addr, 1, 0, 1,
1682 &page, &vma) <= 0) {
1683 DUMP_SEEK (file->f_pos + PAGE_SIZE);
1685 if (page == ZERO_PAGE(addr)) {
1686 DUMP_SEEK (file->f_pos + PAGE_SIZE);
1689 flush_cache_page(vma, addr, page_to_pfn(page));
1691 if ((size += PAGE_SIZE) > limit ||
1692 !dump_write(file, kaddr,
1695 page_cache_release(page);
1700 page_cache_release(page);
1705 #ifdef ELF_CORE_WRITE_EXTRA_DATA
1706 ELF_CORE_WRITE_EXTRA_DATA;
1709 if ((off_t) file->f_pos != offset) {
1711 printk("elf_core_dump: file->f_pos (%ld) != offset (%ld)\n",
1712 (off_t) file->f_pos, offset);
1719 while(!list_empty(&thread_list)) {
1720 struct list_head *tmp = thread_list.next;
1722 kfree(list_entry(tmp, struct elf_thread_status, list));
1730 #ifdef ELF_CORE_COPY_XFPREGS
1737 #endif /* USE_ELF_CORE_DUMP */
1739 static int __init init_elf_binfmt(void)
1741 return register_binfmt(&elf_format);
1744 static void __exit exit_elf_binfmt(void)
1746 /* Remove the COFF and ELF loaders. */
1747 unregister_binfmt(&elf_format);
1750 core_initcall(init_elf_binfmt);
1751 module_exit(exit_elf_binfmt);
1752 MODULE_LICENSE("GPL");