2 * linux/fs/binfmt_elf.c
4 * These are the functions used to load ELF format executables as used
5 * on SVr4 machines. Information on the format may be found in the book
6 * "UNIX SYSTEM V RELEASE 4 Programmers Guide: Ansi C and Programming Support
9 * Copyright 1993, 1994: Eric Youngdale (ericy@cais.com).
12 #include <linux/module.h>
13 #include <linux/kernel.h>
15 #include <linux/stat.h>
16 #include <linux/time.h>
18 #include <linux/mman.h>
19 #include <linux/a.out.h>
20 #include <linux/errno.h>
21 #include <linux/signal.h>
22 #include <linux/binfmts.h>
23 #include <linux/string.h>
24 #include <linux/file.h>
25 #include <linux/fcntl.h>
26 #include <linux/ptrace.h>
27 #include <linux/slab.h>
28 #include <linux/shm.h>
29 #include <linux/personality.h>
30 #include <linux/elfcore.h>
31 #include <linux/init.h>
32 #include <linux/highuid.h>
33 #include <linux/smp.h>
34 #include <linux/smp_lock.h>
35 #include <linux/compiler.h>
36 #include <linux/highmem.h>
37 #include <linux/pagemap.h>
38 #include <linux/security.h>
39 #include <linux/syscalls.h>
40 #include <linux/random.h>
41 #include <linux/elf.h>
42 #include <linux/vs_memory.h>
43 #include <linux/vs_cvirt.h>
44 #include <asm/uaccess.h>
45 #include <asm/param.h>
48 static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs);
49 static int load_elf_library(struct file *);
50 static unsigned long elf_map (struct file *, unsigned long, struct elf_phdr *, int, int, unsigned long);
51 extern int dump_fpu (struct pt_regs *, elf_fpregset_t *);
54 #define elf_addr_t unsigned long
58 * If we don't support core dumping, then supply a NULL so we
61 #if defined(USE_ELF_CORE_DUMP) && defined(CONFIG_ELF_CORE)
62 static int elf_core_dump(long signr, struct pt_regs *regs, struct file *file);
64 #define elf_core_dump NULL
67 #if ELF_EXEC_PAGESIZE > PAGE_SIZE
68 #define ELF_MIN_ALIGN ELF_EXEC_PAGESIZE
70 #define ELF_MIN_ALIGN PAGE_SIZE
73 #ifndef ELF_CORE_EFLAGS
74 #define ELF_CORE_EFLAGS 0
77 #define ELF_PAGESTART(_v) ((_v) & ~(unsigned long)(ELF_MIN_ALIGN-1))
78 #define ELF_PAGEOFFSET(_v) ((_v) & (ELF_MIN_ALIGN-1))
79 #define ELF_PAGEALIGN(_v) (((_v) + ELF_MIN_ALIGN - 1) & ~(ELF_MIN_ALIGN - 1))
81 static struct linux_binfmt elf_format = {
82 .module = THIS_MODULE,
83 .load_binary = load_elf_binary,
84 .load_shlib = load_elf_library,
85 .core_dump = elf_core_dump,
86 .min_coredump = ELF_EXEC_PAGESIZE
89 #define BAD_ADDR(x) ((unsigned long)(x) >= PAGE_MASK)
91 static int set_brk(unsigned long start, unsigned long end)
93 start = ELF_PAGEALIGN(start);
94 end = ELF_PAGEALIGN(end);
97 down_write(¤t->mm->mmap_sem);
98 addr = do_brk(start, end - start);
99 up_write(¤t->mm->mmap_sem);
103 current->mm->start_brk = current->mm->brk = end;
107 /* We need to explicitly zero any fractional pages
108 after the data section (i.e. bss). This would
109 contain the junk from the file that should not
112 static int padzero(unsigned long elf_bss)
116 nbyte = ELF_PAGEOFFSET(elf_bss);
118 nbyte = ELF_MIN_ALIGN - nbyte;
119 if (clear_user((void __user *) elf_bss, nbyte))
125 /* Let's use some macros to make this stack manipulation a litle clearer */
126 #ifdef CONFIG_STACK_GROWSUP
127 #define STACK_ADD(sp, items) ((elf_addr_t __user *)(sp) + (items))
128 #define STACK_ROUND(sp, items) \
129 ((15 + (unsigned long) ((sp) + (items))) &~ 15UL)
130 #define STACK_ALLOC(sp, len) ({ \
131 elf_addr_t __user *old_sp = (elf_addr_t __user *)sp; sp += len; \
134 #define STACK_ADD(sp, items) ((elf_addr_t __user *)(sp) - (items))
135 #define STACK_ROUND(sp, items) \
136 (((unsigned long) (sp - items)) &~ 15UL)
137 #define STACK_ALLOC(sp, len) ({ sp -= len ; sp; })
141 create_elf_tables(struct linux_binprm *bprm, struct elfhdr *exec,
142 int interp_aout, unsigned long load_addr,
143 unsigned long interp_load_addr)
145 unsigned long p = bprm->p;
146 int argc = bprm->argc;
147 int envc = bprm->envc;
148 elf_addr_t __user *argv;
149 elf_addr_t __user *envp;
150 elf_addr_t __user *sp;
151 elf_addr_t __user *u_platform;
152 const char *k_platform = ELF_PLATFORM;
154 elf_addr_t *elf_info;
156 struct task_struct *tsk = current;
159 * If this architecture has a platform capability string, copy it
160 * to userspace. In some cases (Sparc), this info is impossible
161 * for userspace to get any other way, in others (i386) it is
166 size_t len = strlen(k_platform) + 1;
169 * In some cases (e.g. Hyper-Threading), we want to avoid L1
170 * evictions by the processes running on the same package. One
171 * thing we can do is to shuffle the initial stack for them.
174 p = arch_align_stack(p);
176 u_platform = (elf_addr_t __user *)STACK_ALLOC(p, len);
177 if (__copy_to_user(u_platform, k_platform, len))
181 /* Create the ELF interpreter info */
182 elf_info = (elf_addr_t *)current->mm->saved_auxv;
183 #define NEW_AUX_ENT(id, val) \
185 elf_info[ei_index++] = id; \
186 elf_info[ei_index++] = val; \
191 * ARCH_DLINFO must come first so PPC can do its special alignment of
196 NEW_AUX_ENT(AT_HWCAP, ELF_HWCAP);
197 NEW_AUX_ENT(AT_PAGESZ, ELF_EXEC_PAGESIZE);
198 NEW_AUX_ENT(AT_CLKTCK, CLOCKS_PER_SEC);
199 NEW_AUX_ENT(AT_PHDR, load_addr + exec->e_phoff);
200 NEW_AUX_ENT(AT_PHENT, sizeof(struct elf_phdr));
201 NEW_AUX_ENT(AT_PHNUM, exec->e_phnum);
202 NEW_AUX_ENT(AT_BASE, interp_load_addr);
203 NEW_AUX_ENT(AT_FLAGS, 0);
204 NEW_AUX_ENT(AT_ENTRY, exec->e_entry);
205 NEW_AUX_ENT(AT_UID, tsk->uid);
206 NEW_AUX_ENT(AT_EUID, tsk->euid);
207 NEW_AUX_ENT(AT_GID, tsk->gid);
208 NEW_AUX_ENT(AT_EGID, tsk->egid);
209 NEW_AUX_ENT(AT_SECURE, security_bprm_secureexec(bprm));
211 NEW_AUX_ENT(AT_PLATFORM,
212 (elf_addr_t)(unsigned long)u_platform);
214 if (bprm->interp_flags & BINPRM_FLAGS_EXECFD) {
215 NEW_AUX_ENT(AT_EXECFD, bprm->interp_data);
218 /* AT_NULL is zero; clear the rest too */
219 memset(&elf_info[ei_index], 0,
220 sizeof current->mm->saved_auxv - ei_index * sizeof elf_info[0]);
222 /* And advance past the AT_NULL entry. */
225 sp = STACK_ADD(p, ei_index);
227 items = (argc + 1) + (envc + 1);
229 items += 3; /* a.out interpreters require argv & envp too */
231 items += 1; /* ELF interpreters only put argc on the stack */
233 bprm->p = STACK_ROUND(sp, items);
235 /* Point sp at the lowest address on the stack */
236 #ifdef CONFIG_STACK_GROWSUP
237 sp = (elf_addr_t __user *)bprm->p - items - ei_index;
238 bprm->exec = (unsigned long)sp; /* XXX: PARISC HACK */
240 sp = (elf_addr_t __user *)bprm->p;
243 /* Now, let's put argc (and argv, envp if appropriate) on the stack */
244 if (__put_user(argc, sp++))
248 envp = argv + argc + 1;
249 __put_user((elf_addr_t)(unsigned long)argv, sp++);
250 __put_user((elf_addr_t)(unsigned long)envp, sp++);
253 envp = argv + argc + 1;
256 /* Populate argv and envp */
257 p = current->mm->arg_end = current->mm->arg_start;
260 __put_user((elf_addr_t)p, argv++);
261 len = strnlen_user((void __user *)p, PAGE_SIZE*MAX_ARG_PAGES);
262 if (!len || len > PAGE_SIZE*MAX_ARG_PAGES)
266 if (__put_user(0, argv))
268 current->mm->arg_end = current->mm->env_start = p;
271 __put_user((elf_addr_t)p, envp++);
272 len = strnlen_user((void __user *)p, PAGE_SIZE*MAX_ARG_PAGES);
273 if (!len || len > PAGE_SIZE*MAX_ARG_PAGES)
277 if (__put_user(0, envp))
279 current->mm->env_end = p;
281 /* Put the elf_info on the stack in the right place. */
282 sp = (elf_addr_t __user *)envp + 1;
283 if (copy_to_user(sp, elf_info, ei_index * sizeof(elf_addr_t)))
290 static unsigned long elf_map(struct file *filep, unsigned long addr,
291 struct elf_phdr *eppnt, int prot, int type,
292 unsigned long total_size)
294 unsigned long map_addr;
295 unsigned long size = eppnt->p_filesz + ELF_PAGEOFFSET(eppnt->p_vaddr);
296 unsigned long off = eppnt->p_offset - ELF_PAGEOFFSET(eppnt->p_vaddr);
298 addr = ELF_PAGESTART(addr);
299 size = ELF_PAGEALIGN(size);
301 /* mmap() will return -EINVAL if given a zero size, but a
302 * segment with zero filesize is perfectly valid */
306 down_write(¤t->mm->mmap_sem);
308 * total_size is the size of the ELF (interpreter) image.
309 * The _first_ mmap needs to know the full size, otherwise
310 * randomization might put this image into an overlapping
311 * position with the ELF binary image. (since size < total_size)
312 * So we first map the 'big' image - and unmap the remainder at
313 * the end. (which unmap is needed for ELF images with holes.)
316 total_size = ELF_PAGEALIGN(total_size);
317 map_addr = do_mmap(filep, addr, total_size, prot, type, off);
318 if (!BAD_ADDR(map_addr))
319 do_munmap(current->mm, map_addr+size, total_size-size);
321 map_addr = do_mmap(filep, addr, size, prot, type, off);
323 up_write(¤t->mm->mmap_sem);
327 #endif /* !elf_map */
329 static inline unsigned long total_mapping_size(struct elf_phdr *cmds, int nr)
331 int i, first_idx = -1, last_idx = -1;
333 for (i = 0; i < nr; i++)
334 if (cmds[i].p_type == PT_LOAD) {
343 return cmds[last_idx].p_vaddr + cmds[last_idx].p_memsz -
344 ELF_PAGESTART(cmds[first_idx].p_vaddr);
348 /* This is much more generalized than the library routine read function,
349 so we keep this separate. Technically the library read function
350 is only provided so that we can read a.out libraries that have
353 static unsigned long load_elf_interp(struct elfhdr *interp_elf_ex,
354 struct file *interpreter, unsigned long *interp_map_addr,
355 unsigned long no_base)
357 struct elf_phdr *elf_phdata;
358 struct elf_phdr *eppnt;
359 unsigned long load_addr = 0;
360 int load_addr_set = 0;
361 unsigned long last_bss = 0, elf_bss = 0;
362 unsigned long error = ~0UL;
363 unsigned long total_size;
366 /* First of all, some simple consistency checks */
367 if (interp_elf_ex->e_type != ET_EXEC &&
368 interp_elf_ex->e_type != ET_DYN)
370 if (!elf_check_arch(interp_elf_ex))
372 if (!interpreter->f_op || !interpreter->f_op->mmap)
376 * If the size of this structure has changed, then punt, since
377 * we will be doing the wrong thing.
379 if (interp_elf_ex->e_phentsize != sizeof(struct elf_phdr))
381 if (interp_elf_ex->e_phnum < 1 ||
382 interp_elf_ex->e_phnum > 65536U / sizeof(struct elf_phdr))
385 /* Now read in all of the header information */
386 size = sizeof(struct elf_phdr) * interp_elf_ex->e_phnum;
387 if (size > ELF_MIN_ALIGN)
389 elf_phdata = kmalloc(size, GFP_KERNEL);
393 retval = kernel_read(interpreter, interp_elf_ex->e_phoff,
394 (char *)elf_phdata,size);
396 if (retval != size) {
402 total_size = total_mapping_size(elf_phdata, interp_elf_ex->e_phnum);
407 for (i = 0; i < interp_elf_ex->e_phnum; i++, eppnt++) {
408 if (eppnt->p_type == PT_LOAD) {
409 int elf_type = MAP_PRIVATE | MAP_DENYWRITE;
411 unsigned long vaddr = 0;
412 unsigned long k, map_addr;
414 if (eppnt->p_flags & PF_R)
415 elf_prot = PROT_READ;
416 if (eppnt->p_flags & PF_W)
417 elf_prot |= PROT_WRITE;
418 if (eppnt->p_flags & PF_X)
419 elf_prot |= PROT_EXEC;
420 vaddr = eppnt->p_vaddr;
421 if (interp_elf_ex->e_type == ET_EXEC || load_addr_set)
422 elf_type |= MAP_FIXED;
423 else if (no_base && interp_elf_ex->e_type == ET_DYN)
426 map_addr = elf_map(interpreter, load_addr + vaddr,
427 eppnt, elf_prot, elf_type, total_size);
429 if (!*interp_map_addr)
430 *interp_map_addr = map_addr;
432 if (BAD_ADDR(map_addr))
435 if (!load_addr_set &&
436 interp_elf_ex->e_type == ET_DYN) {
437 load_addr = map_addr - ELF_PAGESTART(vaddr);
442 * Check to see if the section's size will overflow the
443 * allowed task size. Note that p_filesz must always be
444 * <= p_memsize so it's only necessary to check p_memsz.
446 k = load_addr + eppnt->p_vaddr;
448 eppnt->p_filesz > eppnt->p_memsz ||
449 eppnt->p_memsz > TASK_SIZE ||
450 TASK_SIZE - eppnt->p_memsz < k) {
456 * Find the end of the file mapping for this phdr, and
457 * keep track of the largest address we see for this.
459 k = load_addr + eppnt->p_vaddr + eppnt->p_filesz;
464 * Do the same thing for the memory mapping - between
465 * elf_bss and last_bss is the bss section.
467 k = load_addr + eppnt->p_memsz + eppnt->p_vaddr;
474 * Now fill out the bss section. First pad the last page up
475 * to the page boundary, and then perform a mmap to make sure
476 * that there are zero-mapped pages up to and including the
479 if (padzero(elf_bss)) {
484 /* What we have mapped so far */
485 elf_bss = ELF_PAGESTART(elf_bss + ELF_MIN_ALIGN - 1);
487 /* Map the last of the bss segment */
488 if (last_bss > elf_bss) {
489 down_write(¤t->mm->mmap_sem);
490 error = do_brk(elf_bss, last_bss - elf_bss);
491 up_write(¤t->mm->mmap_sem);
504 static unsigned long load_aout_interp(struct exec *interp_ex,
505 struct file *interpreter)
507 unsigned long text_data, elf_entry = ~0UL;
511 current->mm->end_code = interp_ex->a_text;
512 text_data = interp_ex->a_text + interp_ex->a_data;
513 current->mm->end_data = text_data;
514 current->mm->brk = interp_ex->a_bss + text_data;
516 switch (N_MAGIC(*interp_ex)) {
519 addr = (char __user *)0;
523 offset = N_TXTOFF(*interp_ex);
524 addr = (char __user *)N_TXTADDR(*interp_ex);
530 down_write(¤t->mm->mmap_sem);
531 do_brk(0, text_data);
532 up_write(¤t->mm->mmap_sem);
533 if (!interpreter->f_op || !interpreter->f_op->read)
535 if (interpreter->f_op->read(interpreter, addr, text_data, &offset) < 0)
537 flush_icache_range((unsigned long)addr,
538 (unsigned long)addr + text_data);
540 down_write(¤t->mm->mmap_sem);
541 do_brk(ELF_PAGESTART(text_data + ELF_MIN_ALIGN - 1),
543 up_write(¤t->mm->mmap_sem);
544 elf_entry = interp_ex->a_entry;
551 * These are the functions used to load ELF style executables and shared
552 * libraries. There is no binary dependent code anywhere else.
555 #define INTERPRETER_NONE 0
556 #define INTERPRETER_AOUT 1
557 #define INTERPRETER_ELF 2
559 #ifndef STACK_RND_MASK
560 #define STACK_RND_MASK 0x7ff /* with 4K pages 8MB of VA */
563 static unsigned long randomize_stack_top(unsigned long stack_top)
565 unsigned int random_variable = 0;
567 if (current->flags & PF_RANDOMIZE) {
568 random_variable = get_random_int() & STACK_RND_MASK;
569 random_variable <<= PAGE_SHIFT;
571 #ifdef CONFIG_STACK_GROWSUP
572 return PAGE_ALIGN(stack_top) + random_variable;
574 return PAGE_ALIGN(stack_top) - random_variable;
578 static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
580 struct file *interpreter = NULL; /* to shut gcc up */
581 unsigned long load_addr = 0, load_bias = 0;
582 int load_addr_set = 0;
583 char * elf_interpreter = NULL;
584 unsigned int interpreter_type = INTERPRETER_NONE;
585 unsigned char ibcs2_interpreter = 0;
587 struct elf_phdr *elf_ppnt, *elf_phdata;
588 unsigned long elf_bss, elf_brk;
592 unsigned long elf_entry, interp_load_addr = 0, interp_map_addr = 0;
593 unsigned long start_code, end_code, start_data, end_data;
594 unsigned long reloc_func_desc = 0;
595 char passed_fileno[6];
596 struct files_struct *files;
597 int have_pt_gnu_stack, executable_stack;
598 unsigned long def_flags = 0;
600 struct elfhdr elf_ex;
601 struct elfhdr interp_elf_ex;
602 struct exec interp_ex;
605 loc = kmalloc(sizeof(*loc), GFP_KERNEL);
611 /* Get the exec-header */
612 loc->elf_ex = *((struct elfhdr *)bprm->buf);
615 /* First of all, some simple consistency checks */
616 if (memcmp(loc->elf_ex.e_ident, ELFMAG, SELFMAG) != 0)
619 if (loc->elf_ex.e_type != ET_EXEC && loc->elf_ex.e_type != ET_DYN)
621 if (!elf_check_arch(&loc->elf_ex))
623 if (!bprm->file->f_op||!bprm->file->f_op->mmap)
626 /* Now read in all of the header information */
627 if (loc->elf_ex.e_phentsize != sizeof(struct elf_phdr))
629 if (loc->elf_ex.e_phnum < 1 ||
630 loc->elf_ex.e_phnum > 65536U / sizeof(struct elf_phdr))
632 size = loc->elf_ex.e_phnum * sizeof(struct elf_phdr);
634 elf_phdata = kmalloc(size, GFP_KERNEL);
638 retval = kernel_read(bprm->file, loc->elf_ex.e_phoff,
639 (char *)elf_phdata, size);
640 if (retval != size) {
646 files = current->files; /* Refcounted so ok */
647 retval = unshare_files();
650 if (files == current->files) {
651 put_files_struct(files);
655 /* exec will make our files private anyway, but for the a.out
656 loader stuff we need to do it earlier */
657 retval = get_unused_fd();
660 get_file(bprm->file);
661 fd_install(elf_exec_fileno = retval, bprm->file);
663 elf_ppnt = elf_phdata;
672 for (i = 0; i < loc->elf_ex.e_phnum; i++) {
673 if (elf_ppnt->p_type == PT_INTERP) {
674 /* This is the program interpreter used for
675 * shared libraries - for now assume that this
676 * is an a.out format binary
679 if (elf_ppnt->p_filesz > PATH_MAX ||
680 elf_ppnt->p_filesz < 2)
684 elf_interpreter = kmalloc(elf_ppnt->p_filesz,
686 if (!elf_interpreter)
689 retval = kernel_read(bprm->file, elf_ppnt->p_offset,
692 if (retval != elf_ppnt->p_filesz) {
695 goto out_free_interp;
697 /* make sure path is NULL terminated */
699 if (elf_interpreter[elf_ppnt->p_filesz - 1] != '\0')
700 goto out_free_interp;
702 /* If the program interpreter is one of these two,
703 * then assume an iBCS2 image. Otherwise assume
704 * a native linux image.
706 if (strcmp(elf_interpreter,"/usr/lib/libc.so.1") == 0 ||
707 strcmp(elf_interpreter,"/usr/lib/ld.so.1") == 0)
708 ibcs2_interpreter = 1;
711 * The early SET_PERSONALITY here is so that the lookup
712 * for the interpreter happens in the namespace of the
713 * to-be-execed image. SET_PERSONALITY can select an
716 * However, SET_PERSONALITY is NOT allowed to switch
717 * this task into the new images's memory mapping
718 * policy - that is, TASK_SIZE must still evaluate to
719 * that which is appropriate to the execing application.
720 * This is because exit_mmap() needs to have TASK_SIZE
721 * evaluate to the size of the old image.
723 * So if (say) a 64-bit application is execing a 32-bit
724 * application it is the architecture's responsibility
725 * to defer changing the value of TASK_SIZE until the
726 * switch really is going to happen - do this in
727 * flush_thread(). - akpm
729 SET_PERSONALITY(loc->elf_ex, ibcs2_interpreter);
731 interpreter = open_exec(elf_interpreter);
732 retval = PTR_ERR(interpreter);
733 if (IS_ERR(interpreter))
734 goto out_free_interp;
735 retval = kernel_read(interpreter, 0, bprm->buf,
737 if (retval != BINPRM_BUF_SIZE) {
740 goto out_free_dentry;
743 /* Get the exec headers */
744 loc->interp_ex = *((struct exec *)bprm->buf);
745 loc->interp_elf_ex = *((struct elfhdr *)bprm->buf);
751 elf_ppnt = elf_phdata;
752 executable_stack = EXSTACK_DEFAULT;
754 for (i = 0; i < loc->elf_ex.e_phnum; i++, elf_ppnt++)
755 if (elf_ppnt->p_type == PT_GNU_STACK) {
756 if (elf_ppnt->p_flags & PF_X)
757 executable_stack = EXSTACK_ENABLE_X;
759 executable_stack = EXSTACK_DISABLE_X;
762 have_pt_gnu_stack = (i < loc->elf_ex.e_phnum);
764 if (current->personality == PER_LINUX && (exec_shield & 2)) {
765 executable_stack = EXSTACK_DISABLE_X;
766 current->flags |= PF_RANDOMIZE;
769 /* Some simple consistency checks for the interpreter */
770 if (elf_interpreter) {
771 interpreter_type = INTERPRETER_ELF | INTERPRETER_AOUT;
773 /* Now figure out which format our binary is */
774 if ((N_MAGIC(loc->interp_ex) != OMAGIC) &&
775 (N_MAGIC(loc->interp_ex) != ZMAGIC) &&
776 (N_MAGIC(loc->interp_ex) != QMAGIC))
777 interpreter_type = INTERPRETER_ELF;
779 if (memcmp(loc->interp_elf_ex.e_ident, ELFMAG, SELFMAG) != 0)
780 interpreter_type &= ~INTERPRETER_ELF;
783 if (!interpreter_type)
784 goto out_free_dentry;
786 /* Make sure only one type was selected */
787 if ((interpreter_type & INTERPRETER_ELF) &&
788 interpreter_type != INTERPRETER_ELF) {
789 // FIXME - ratelimit this before re-enabling
790 // printk(KERN_WARNING "ELF: Ambiguous type, using ELF\n");
791 interpreter_type = INTERPRETER_ELF;
793 /* Verify the interpreter has a valid arch */
794 if ((interpreter_type == INTERPRETER_ELF) &&
795 !elf_check_arch(&loc->interp_elf_ex))
796 goto out_free_dentry;
798 /* Executables without an interpreter also need a personality */
799 SET_PERSONALITY(loc->elf_ex, ibcs2_interpreter);
802 /* OK, we are done with that, now set up the arg stuff,
803 and then start this sucker up */
804 if ((!bprm->sh_bang) && (interpreter_type == INTERPRETER_AOUT)) {
805 char *passed_p = passed_fileno;
806 sprintf(passed_fileno, "%d", elf_exec_fileno);
808 if (elf_interpreter) {
809 retval = copy_strings_kernel(1, &passed_p, bprm);
811 goto out_free_dentry;
816 /* Flush all traces of the currently running executable */
817 retval = flush_old_exec(bprm);
819 goto out_free_dentry;
823 * Turn off the CS limit completely if exec-shield disabled or
826 if (!exec_shield || executable_stack != EXSTACK_DISABLE_X || nx_enabled)
827 arch_add_exec_range(current->mm, -1);
830 /* Discard our unneeded old files struct */
832 put_files_struct(files);
836 /* OK, This is the point of no return */
837 current->mm->start_data = 0;
838 current->mm->end_data = 0;
839 current->mm->end_code = 0;
840 current->mm->mmap = NULL;
841 current->flags &= ~PF_FORKNOEXEC;
842 current->mm->def_flags = def_flags;
844 /* Do this immediately, since STACK_TOP as used in setup_arg_pages
845 may depend on the personality. */
846 SET_PERSONALITY(loc->elf_ex, ibcs2_interpreter);
847 if (!(exec_shield & 2) &&
848 elf_read_implies_exec(loc->elf_ex, executable_stack))
849 current->personality |= READ_IMPLIES_EXEC;
851 if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
852 current->flags |= PF_RANDOMIZE;
853 arch_pick_mmap_layout(current->mm);
855 /* Do this so that we can load the interpreter, if need be. We will
856 change some of these later */
857 current->mm->free_area_cache = current->mm->mmap_base;
858 current->mm->cached_hole_size = 0;
859 retval = setup_arg_pages(bprm, randomize_stack_top(STACK_TOP),
862 send_sig(SIGKILL, current, 0);
863 goto out_free_dentry;
866 current->mm->start_stack = bprm->p;
868 /* Now we do a little grungy work by mmaping the ELF image into
869 the correct location in memory.
871 for(i = 0, elf_ppnt = elf_phdata;
872 i < loc->elf_ex.e_phnum; i++, elf_ppnt++) {
873 int elf_prot = 0, elf_flags;
874 unsigned long k, vaddr;
876 if (elf_ppnt->p_type != PT_LOAD)
879 if (unlikely (elf_brk > elf_bss)) {
882 /* There was a PT_LOAD segment with p_memsz > p_filesz
883 before this one. Map anonymous pages, if needed,
884 and clear the area. */
885 retval = set_brk (elf_bss + load_bias,
886 elf_brk + load_bias);
888 send_sig(SIGKILL, current, 0);
889 goto out_free_dentry;
891 nbyte = ELF_PAGEOFFSET(elf_bss);
893 nbyte = ELF_MIN_ALIGN - nbyte;
894 if (nbyte > elf_brk - elf_bss)
895 nbyte = elf_brk - elf_bss;
896 if (clear_user((void __user *)elf_bss +
899 * This bss-zeroing can fail if the ELF
900 * file specifies odd protections. So
901 * we don't check the return value
907 if (elf_ppnt->p_flags & PF_R)
908 elf_prot |= PROT_READ;
909 if (elf_ppnt->p_flags & PF_W)
910 elf_prot |= PROT_WRITE;
911 if (elf_ppnt->p_flags & PF_X)
912 elf_prot |= PROT_EXEC;
914 elf_flags = MAP_PRIVATE | MAP_DENYWRITE | MAP_EXECUTABLE;
916 vaddr = elf_ppnt->p_vaddr;
917 if (loc->elf_ex.e_type == ET_EXEC || load_addr_set)
918 elf_flags |= MAP_FIXED;
919 else if (loc->elf_ex.e_type == ET_DYN)
923 load_bias = ELF_PAGESTART(ELF_ET_DYN_BASE - vaddr);
926 error = elf_map(bprm->file, load_bias + vaddr, elf_ppnt,
927 elf_prot, elf_flags, 0);
928 if (BAD_ADDR(error)) {
929 send_sig(SIGKILL, current, 0);
930 goto out_free_dentry;
933 if (!load_addr_set) {
935 load_addr = (elf_ppnt->p_vaddr - elf_ppnt->p_offset);
936 if (loc->elf_ex.e_type == ET_DYN) {
938 ELF_PAGESTART(load_bias + vaddr);
939 load_addr += load_bias;
940 reloc_func_desc = load_bias;
943 k = elf_ppnt->p_vaddr;
950 * Check to see if the section's size will overflow the
951 * allowed task size. Note that p_filesz must always be
952 * <= p_memsz so it is only necessary to check p_memsz.
954 if (BAD_ADDR(k) || elf_ppnt->p_filesz > elf_ppnt->p_memsz ||
955 elf_ppnt->p_memsz > TASK_SIZE ||
956 TASK_SIZE - elf_ppnt->p_memsz < k) {
957 /* set_brk can never work. Avoid overflows. */
958 send_sig(SIGKILL, current, 0);
959 goto out_free_dentry;
962 k = elf_ppnt->p_vaddr + elf_ppnt->p_filesz;
966 if ((elf_ppnt->p_flags & PF_X) && end_code < k)
970 k = elf_ppnt->p_vaddr + elf_ppnt->p_memsz;
975 loc->elf_ex.e_entry += load_bias;
976 elf_bss += load_bias;
977 elf_brk += load_bias;
978 start_code += load_bias;
979 end_code += load_bias;
980 start_data += load_bias;
981 end_data += load_bias;
983 /* Calling set_brk effectively mmaps the pages that we need
984 * for the bss and break sections. We must do this before
985 * mapping in the interpreter, to make sure it doesn't wind
986 * up getting placed where the bss needs to go.
988 retval = set_brk(elf_bss, elf_brk);
990 send_sig(SIGKILL, current, 0);
991 goto out_free_dentry;
993 if (likely(elf_bss != elf_brk) && unlikely(padzero(elf_bss))) {
994 send_sig(SIGSEGV, current, 0);
995 retval = -EFAULT; /* Nobody gets to see this, but.. */
996 goto out_free_dentry;
999 if (elf_interpreter) {
1000 if (interpreter_type == INTERPRETER_AOUT)
1001 elf_entry = load_aout_interp(&loc->interp_ex,
1004 elf_entry = load_elf_interp(&loc->interp_elf_ex,
1008 if (!BAD_ADDR(elf_entry)) {
1009 /* load_elf_interp() returns relocation adjustment */
1010 interp_load_addr = elf_entry;
1011 elf_entry += loc->interp_elf_ex.e_entry;
1014 if (BAD_ADDR(elf_entry)) {
1015 force_sig(SIGSEGV, current);
1016 retval = IS_ERR((void *)elf_entry) ?
1017 (int)elf_entry : -EINVAL;
1018 goto out_free_dentry;
1020 reloc_func_desc = interp_load_addr;
1022 allow_write_access(interpreter);
1024 kfree(elf_interpreter);
1026 elf_entry = loc->elf_ex.e_entry;
1027 if (BAD_ADDR(elf_entry)) {
1028 force_sig(SIGSEGV, current);
1030 goto out_free_dentry;
1034 if (interpreter_type != INTERPRETER_AOUT)
1035 sys_close(elf_exec_fileno);
1037 set_binfmt(&elf_format);
1039 #ifdef ARCH_HAS_SETUP_ADDITIONAL_PAGES
1040 retval = arch_setup_additional_pages(bprm, executable_stack,
1041 start_code, interp_map_addr);
1043 send_sig(SIGKILL, current, 0);
1046 #endif /* ARCH_HAS_SETUP_ADDITIONAL_PAGES */
1050 compute_creds(bprm);
1051 current->flags &= ~PF_FORKNOEXEC;
1052 create_elf_tables(bprm, &loc->elf_ex,
1053 (interpreter_type == INTERPRETER_AOUT),
1054 load_addr, interp_load_addr);
1055 /* N.B. passed_fileno might not be initialized? */
1056 if (interpreter_type == INTERPRETER_AOUT)
1057 current->mm->arg_start += strlen(passed_fileno) + 1;
1058 current->mm->end_code = end_code;
1059 current->mm->start_code = start_code;
1060 current->mm->start_data = start_data;
1061 current->mm->end_data = end_data;
1062 current->mm->start_stack = bprm->p;
1064 #ifdef __HAVE_ARCH_RANDOMIZE_BRK
1065 if (current->flags & PF_RANDOMIZE)
1066 randomize_brk(elf_brk);
1068 if (current->personality & MMAP_PAGE_ZERO) {
1069 /* Why this, you ask??? Well SVr4 maps page 0 as read-only,
1070 and some applications "depend" upon this behavior.
1071 Since we do not have the power to recompile these, we
1072 emulate the SVr4 behavior. Sigh. */
1073 down_write(¤t->mm->mmap_sem);
1074 error = do_mmap(NULL, 0, PAGE_SIZE, PROT_READ | PROT_EXEC,
1075 MAP_FIXED | MAP_PRIVATE, 0);
1076 up_write(¤t->mm->mmap_sem);
1079 #ifdef ELF_PLAT_INIT
1081 * The ABI may specify that certain registers be set up in special
1082 * ways (on i386 %edx is the address of a DT_FINI function, for
1083 * example. In addition, it may also specify (eg, PowerPC64 ELF)
1084 * that the e_entry field is the address of the function descriptor
1085 * for the startup routine, rather than the address of the startup
1086 * routine itself. This macro performs whatever initialization to
1087 * the regs structure is required as well as any relocations to the
1088 * function descriptor entries when executing dynamically links apps.
1090 ELF_PLAT_INIT(regs, reloc_func_desc);
1093 start_thread(regs, elf_entry, bprm->p);
1094 if (unlikely(current->ptrace & PT_PTRACED)) {
1095 if (current->ptrace & PT_TRACE_EXEC)
1096 ptrace_notify ((PTRACE_EVENT_EXEC << 8) | SIGTRAP);
1098 send_sig(SIGTRAP, current, 0);
1108 allow_write_access(interpreter);
1112 kfree(elf_interpreter);
1114 sys_close(elf_exec_fileno);
1117 put_files_struct(current->files);
1118 current->files = files;
1125 /* This is really simpleminded and specialized - we are loading an
1126 a.out library that is given an ELF header. */
1127 static int load_elf_library(struct file *file)
1129 struct elf_phdr *elf_phdata;
1130 struct elf_phdr *eppnt;
1131 unsigned long elf_bss, bss, len;
1132 int retval, error, i, j;
1133 struct elfhdr elf_ex;
1136 retval = kernel_read(file, 0, (char *)&elf_ex, sizeof(elf_ex));
1137 if (retval != sizeof(elf_ex))
1140 if (memcmp(elf_ex.e_ident, ELFMAG, SELFMAG) != 0)
1143 /* First of all, some simple consistency checks */
1144 if (elf_ex.e_type != ET_EXEC || elf_ex.e_phnum > 2 ||
1145 !elf_check_arch(&elf_ex) || !file->f_op || !file->f_op->mmap)
1148 /* Now read in all of the header information */
1150 j = sizeof(struct elf_phdr) * elf_ex.e_phnum;
1151 /* j < ELF_MIN_ALIGN because elf_ex.e_phnum <= 2 */
1154 elf_phdata = kmalloc(j, GFP_KERNEL);
1160 retval = kernel_read(file, elf_ex.e_phoff, (char *)eppnt, j);
1164 for (j = 0, i = 0; i<elf_ex.e_phnum; i++)
1165 if ((eppnt + i)->p_type == PT_LOAD)
1170 while (eppnt->p_type != PT_LOAD)
1173 /* Now use mmap to map the library into memory. */
1174 down_write(¤t->mm->mmap_sem);
1175 error = do_mmap(file,
1176 ELF_PAGESTART(eppnt->p_vaddr),
1178 ELF_PAGEOFFSET(eppnt->p_vaddr)),
1179 PROT_READ | PROT_WRITE | PROT_EXEC,
1180 MAP_FIXED | MAP_PRIVATE | MAP_DENYWRITE,
1182 ELF_PAGEOFFSET(eppnt->p_vaddr)));
1183 up_write(¤t->mm->mmap_sem);
1184 if (error != ELF_PAGESTART(eppnt->p_vaddr))
1187 elf_bss = eppnt->p_vaddr + eppnt->p_filesz;
1188 if (padzero(elf_bss)) {
1193 len = ELF_PAGESTART(eppnt->p_filesz + eppnt->p_vaddr +
1195 bss = eppnt->p_memsz + eppnt->p_vaddr;
1197 down_write(¤t->mm->mmap_sem);
1198 do_brk(len, bss - len);
1199 up_write(¤t->mm->mmap_sem);
1210 * Note that some platforms still use traditional core dumps and not
1211 * the ELF core dump. Each platform can select it as appropriate.
1213 #if defined(USE_ELF_CORE_DUMP) && defined(CONFIG_ELF_CORE)
1218 * Modelled on fs/exec.c:aout_core_dump()
1219 * Jeremy Fitzhardinge <jeremy@sw.oz.au>
1222 * These are the only things you should do on a core-file: use only these
1223 * functions to write out all the necessary info.
1225 static int dump_write(struct file *file, const void *addr, int nr)
1227 return file->f_op->write(file, addr, nr, &file->f_pos) == nr;
1230 static int dump_seek(struct file *file, loff_t off)
1232 if (file->f_op->llseek) {
1233 if (file->f_op->llseek(file, off, 0) != off)
1241 * Decide whether a segment is worth dumping; default is yes to be
1242 * sure (missing info is worse than too much; etc).
1243 * Personally I'd include everything, and use the coredump limit...
1245 * I think we should skip something. But I am not sure how. H.J.
1247 static int maydump(struct vm_area_struct *vma)
1249 /* Do not dump I/O mapped devices or special mappings */
1250 if (vma->vm_flags & (VM_IO | VM_RESERVED))
1253 if (vma->vm_flags & VM_DONTEXPAND) /* Kludge for vDSO. */
1256 /* Dump shared memory only if mapped from an anonymous file. */
1257 if (vma->vm_flags & VM_SHARED)
1258 return vma->vm_file->f_dentry->d_inode->i_nlink == 0;
1260 /* If it hasn't been written to, don't write it out */
1267 /* An ELF note in memory */
1272 unsigned int datasz;
1276 static int notesize(struct memelfnote *en)
1280 sz = sizeof(struct elf_note);
1281 sz += roundup(strlen(en->name) + 1, 4);
1282 sz += roundup(en->datasz, 4);
1287 #define DUMP_WRITE(addr, nr) \
1288 do { if (!dump_write(file, (addr), (nr))) return 0; } while(0)
1289 #define DUMP_SEEK(off) \
1290 do { if (!dump_seek(file, (off))) return 0; } while(0)
1292 static int writenote(struct memelfnote *men, struct file *file)
1296 en.n_namesz = strlen(men->name) + 1;
1297 en.n_descsz = men->datasz;
1298 en.n_type = men->type;
1300 DUMP_WRITE(&en, sizeof(en));
1301 DUMP_WRITE(men->name, en.n_namesz);
1302 /* XXX - cast from long long to long to avoid need for libgcc.a */
1303 DUMP_SEEK(roundup((unsigned long)file->f_pos, 4)); /* XXX */
1304 DUMP_WRITE(men->data, men->datasz);
1305 DUMP_SEEK(roundup((unsigned long)file->f_pos, 4)); /* XXX */
1312 #define DUMP_WRITE(addr, nr) \
1313 if ((size += (nr)) > limit || !dump_write(file, (addr), (nr))) \
1315 #define DUMP_SEEK(off) \
1316 if (!dump_seek(file, (off))) \
1319 static void fill_elf_header(struct elfhdr *elf, int segs)
1321 memcpy(elf->e_ident, ELFMAG, SELFMAG);
1322 elf->e_ident[EI_CLASS] = ELF_CLASS;
1323 elf->e_ident[EI_DATA] = ELF_DATA;
1324 elf->e_ident[EI_VERSION] = EV_CURRENT;
1325 elf->e_ident[EI_OSABI] = ELF_OSABI;
1326 memset(elf->e_ident+EI_PAD, 0, EI_NIDENT-EI_PAD);
1328 elf->e_type = ET_CORE;
1329 elf->e_machine = ELF_ARCH;
1330 elf->e_version = EV_CURRENT;
1332 elf->e_phoff = sizeof(struct elfhdr);
1334 elf->e_flags = ELF_CORE_EFLAGS;
1335 elf->e_ehsize = sizeof(struct elfhdr);
1336 elf->e_phentsize = sizeof(struct elf_phdr);
1337 elf->e_phnum = segs;
1338 elf->e_shentsize = 0;
1340 elf->e_shstrndx = 0;
1344 static void fill_elf_note_phdr(struct elf_phdr *phdr, int sz, off_t offset)
1346 phdr->p_type = PT_NOTE;
1347 phdr->p_offset = offset;
1350 phdr->p_filesz = sz;
1357 static void fill_note(struct memelfnote *note, const char *name, int type,
1358 unsigned int sz, void *data)
1368 * fill up all the fields in prstatus from the given task struct, except
1369 * registers which need to be filled up separately.
1371 static void fill_prstatus(struct elf_prstatus *prstatus,
1372 struct task_struct *p, long signr)
1374 prstatus->pr_info.si_signo = prstatus->pr_cursig = signr;
1375 prstatus->pr_sigpend = p->pending.signal.sig[0];
1376 prstatus->pr_sighold = p->blocked.sig[0];
1377 prstatus->pr_pid = p->pid;
1378 prstatus->pr_ppid = p->parent->pid;
1379 prstatus->pr_pgrp = process_group(p);
1380 prstatus->pr_sid = p->signal->session;
1381 if (thread_group_leader(p)) {
1383 * This is the record for the group leader. Add in the
1384 * cumulative times of previous dead threads. This total
1385 * won't include the time of each live thread whose state
1386 * is included in the core dump. The final total reported
1387 * to our parent process when it calls wait4 will include
1388 * those sums as well as the little bit more time it takes
1389 * this and each other thread to finish dying after the
1390 * core dump synchronization phase.
1392 cputime_to_timeval(cputime_add(p->utime, p->signal->utime),
1393 &prstatus->pr_utime);
1394 cputime_to_timeval(cputime_add(p->stime, p->signal->stime),
1395 &prstatus->pr_stime);
1397 cputime_to_timeval(p->utime, &prstatus->pr_utime);
1398 cputime_to_timeval(p->stime, &prstatus->pr_stime);
1400 cputime_to_timeval(p->signal->cutime, &prstatus->pr_cutime);
1401 cputime_to_timeval(p->signal->cstime, &prstatus->pr_cstime);
1404 static int fill_psinfo(struct elf_prpsinfo *psinfo, struct task_struct *p,
1405 struct mm_struct *mm)
1407 unsigned int i, len;
1409 /* first copy the parameters from user space */
1410 memset(psinfo, 0, sizeof(struct elf_prpsinfo));
1412 len = mm->arg_end - mm->arg_start;
1413 if (len >= ELF_PRARGSZ)
1414 len = ELF_PRARGSZ-1;
1415 if (copy_from_user(&psinfo->pr_psargs,
1416 (const char __user *)mm->arg_start, len))
1418 for(i = 0; i < len; i++)
1419 if (psinfo->pr_psargs[i] == 0)
1420 psinfo->pr_psargs[i] = ' ';
1421 psinfo->pr_psargs[len] = 0;
1423 psinfo->pr_pid = p->pid;
1424 psinfo->pr_ppid = p->parent->pid;
1425 psinfo->pr_pgrp = process_group(p);
1426 psinfo->pr_sid = p->signal->session;
1428 i = p->state ? ffz(~p->state) + 1 : 0;
1429 psinfo->pr_state = i;
1430 psinfo->pr_sname = (i > 5) ? '.' : "RSDTZW"[i];
1431 psinfo->pr_zomb = psinfo->pr_sname == 'Z';
1432 psinfo->pr_nice = task_nice(p);
1433 psinfo->pr_flag = p->flags;
1434 SET_UID(psinfo->pr_uid, p->uid);
1435 SET_GID(psinfo->pr_gid, p->gid);
1436 strncpy(psinfo->pr_fname, p->comm, sizeof(psinfo->pr_fname));
1441 /* Here is the structure in which status of each thread is captured. */
1442 struct elf_thread_status
1444 struct list_head list;
1445 struct elf_prstatus prstatus; /* NT_PRSTATUS */
1446 elf_fpregset_t fpu; /* NT_PRFPREG */
1447 struct task_struct *thread;
1448 #ifdef ELF_CORE_COPY_XFPREGS
1449 elf_fpxregset_t xfpu; /* NT_PRXFPREG */
1451 struct memelfnote notes[3];
1456 * In order to add the specific thread information for the elf file format,
1457 * we need to keep a linked list of every threads pr_status and then create
1458 * a single section for them in the final core file.
1460 static int elf_dump_thread_status(long signr, struct elf_thread_status *t)
1463 struct task_struct *p = t->thread;
1466 fill_prstatus(&t->prstatus, p, signr);
1467 elf_core_copy_task_regs(p, &t->prstatus.pr_reg);
1469 fill_note(&t->notes[0], "CORE", NT_PRSTATUS, sizeof(t->prstatus),
1472 sz += notesize(&t->notes[0]);
1474 if ((t->prstatus.pr_fpvalid = elf_core_copy_task_fpregs(p, NULL,
1476 fill_note(&t->notes[1], "CORE", NT_PRFPREG, sizeof(t->fpu),
1479 sz += notesize(&t->notes[1]);
1482 #ifdef ELF_CORE_COPY_XFPREGS
1483 if (elf_core_copy_task_xfpregs(p, &t->xfpu)) {
1484 fill_note(&t->notes[2], "LINUX", NT_PRXFPREG, sizeof(t->xfpu),
1487 sz += notesize(&t->notes[2]);
1496 * This is a two-pass process; first we find the offsets of the bits,
1497 * and then they are actually written out. If we run out of core limit
1500 static int elf_core_dump(long signr, struct pt_regs *regs, struct file *file)
1508 struct vm_area_struct *vma;
1509 struct elfhdr *elf = NULL;
1510 off_t offset = 0, dataoff;
1511 unsigned long limit = current->signal->rlim[RLIMIT_CORE].rlim_cur;
1513 struct memelfnote *notes = NULL;
1514 struct elf_prstatus *prstatus = NULL; /* NT_PRSTATUS */
1515 struct elf_prpsinfo *psinfo = NULL; /* NT_PRPSINFO */
1516 struct task_struct *g, *p;
1517 LIST_HEAD(thread_list);
1518 struct list_head *t;
1519 elf_fpregset_t *fpu = NULL;
1520 #ifdef ELF_CORE_COPY_XFPREGS
1521 elf_fpxregset_t *xfpu = NULL;
1523 int thread_status_size = 0;
1527 * We no longer stop all VM operations.
1529 * This is because those proceses that could possibly change map_count
1530 * or the mmap / vma pages are now blocked in do_exit on current
1531 * finishing this core dump.
1533 * Only ptrace can touch these memory addresses, but it doesn't change
1534 * the map_count or the pages allocated. So no possibility of crashing
1535 * exists while dumping the mm->vm_next areas to the core file.
1538 /* alloc memory for large data structures: too large to be on stack */
1539 elf = kmalloc(sizeof(*elf), GFP_KERNEL);
1542 prstatus = kmalloc(sizeof(*prstatus), GFP_KERNEL);
1545 psinfo = kmalloc(sizeof(*psinfo), GFP_KERNEL);
1548 notes = kmalloc(NUM_NOTES * sizeof(struct memelfnote), GFP_KERNEL);
1551 fpu = kmalloc(sizeof(*fpu), GFP_KERNEL);
1554 #ifdef ELF_CORE_COPY_XFPREGS
1555 xfpu = kmalloc(sizeof(*xfpu), GFP_KERNEL);
1561 struct elf_thread_status *tmp;
1562 read_lock(&tasklist_lock);
1564 if (current->mm == p->mm && current != p) {
1565 tmp = kzalloc(sizeof(*tmp), GFP_ATOMIC);
1567 read_unlock(&tasklist_lock);
1570 INIT_LIST_HEAD(&tmp->list);
1572 list_add(&tmp->list, &thread_list);
1574 while_each_thread(g,p);
1575 read_unlock(&tasklist_lock);
1576 list_for_each(t, &thread_list) {
1577 struct elf_thread_status *tmp;
1580 tmp = list_entry(t, struct elf_thread_status, list);
1581 sz = elf_dump_thread_status(signr, tmp);
1582 thread_status_size += sz;
1585 /* now collect the dump for the current */
1586 memset(prstatus, 0, sizeof(*prstatus));
1587 fill_prstatus(prstatus, current, signr);
1588 elf_core_copy_regs(&prstatus->pr_reg, regs);
1590 segs = current->mm->map_count;
1591 #ifdef ELF_CORE_EXTRA_PHDRS
1592 segs += ELF_CORE_EXTRA_PHDRS;
1596 fill_elf_header(elf, segs + 1); /* including notes section */
1599 current->flags |= PF_DUMPCORE;
1602 * Set up the notes in similar form to SVR4 core dumps made
1603 * with info from their /proc.
1606 fill_note(notes + 0, "CORE", NT_PRSTATUS, sizeof(*prstatus), prstatus);
1607 fill_psinfo(psinfo, current->group_leader, current->mm);
1608 fill_note(notes + 1, "CORE", NT_PRPSINFO, sizeof(*psinfo), psinfo);
1612 auxv = (elf_addr_t *)current->mm->saved_auxv;
1617 while (auxv[i - 2] != AT_NULL);
1618 fill_note(¬es[numnote++], "CORE", NT_AUXV,
1619 i * sizeof(elf_addr_t), auxv);
1621 /* Try to dump the FPU. */
1622 if ((prstatus->pr_fpvalid =
1623 elf_core_copy_task_fpregs(current, regs, fpu)))
1624 fill_note(notes + numnote++,
1625 "CORE", NT_PRFPREG, sizeof(*fpu), fpu);
1626 #ifdef ELF_CORE_COPY_XFPREGS
1627 if (elf_core_copy_task_xfpregs(current, xfpu))
1628 fill_note(notes + numnote++,
1629 "LINUX", NT_PRXFPREG, sizeof(*xfpu), xfpu);
1635 DUMP_WRITE(elf, sizeof(*elf));
1636 offset += sizeof(*elf); /* Elf header */
1637 offset += (segs+1) * sizeof(struct elf_phdr); /* Program headers */
1639 /* Write notes phdr entry */
1641 struct elf_phdr phdr;
1644 for (i = 0; i < numnote; i++)
1645 sz += notesize(notes + i);
1647 sz += thread_status_size;
1649 fill_elf_note_phdr(&phdr, sz, offset);
1651 DUMP_WRITE(&phdr, sizeof(phdr));
1654 /* Page-align dumped data */
1655 dataoff = offset = roundup(offset, ELF_EXEC_PAGESIZE);
1657 /* Write program headers for segments dump */
1658 for (vma = current->mm->mmap; vma != NULL; vma = vma->vm_next) {
1659 struct elf_phdr phdr;
1662 sz = vma->vm_end - vma->vm_start;
1664 phdr.p_type = PT_LOAD;
1665 phdr.p_offset = offset;
1666 phdr.p_vaddr = vma->vm_start;
1668 phdr.p_filesz = maydump(vma) ? sz : 0;
1670 offset += phdr.p_filesz;
1671 phdr.p_flags = vma->vm_flags & VM_READ ? PF_R : 0;
1672 if (vma->vm_flags & VM_WRITE)
1673 phdr.p_flags |= PF_W;
1674 if (vma->vm_flags & VM_EXEC)
1675 phdr.p_flags |= PF_X;
1676 phdr.p_align = ELF_EXEC_PAGESIZE;
1678 DUMP_WRITE(&phdr, sizeof(phdr));
1681 #ifdef ELF_CORE_WRITE_EXTRA_PHDRS
1682 ELF_CORE_WRITE_EXTRA_PHDRS;
1685 /* write out the notes section */
1686 for (i = 0; i < numnote; i++)
1687 if (!writenote(notes + i, file))
1690 /* write out the thread status notes section */
1691 list_for_each(t, &thread_list) {
1692 struct elf_thread_status *tmp =
1693 list_entry(t, struct elf_thread_status, list);
1695 for (i = 0; i < tmp->num_notes; i++)
1696 if (!writenote(&tmp->notes[i], file))
1702 for (vma = current->mm->mmap; vma != NULL; vma = vma->vm_next) {
1708 for (addr = vma->vm_start;
1710 addr += PAGE_SIZE) {
1712 struct vm_area_struct *vma;
1714 if (get_user_pages(current, current->mm, addr, 1, 0, 1,
1715 &page, &vma) <= 0) {
1716 DUMP_SEEK(file->f_pos + PAGE_SIZE);
1718 if (page == ZERO_PAGE(addr)) {
1719 DUMP_SEEK(file->f_pos + PAGE_SIZE);
1722 flush_cache_page(vma, addr,
1725 if ((size += PAGE_SIZE) > limit ||
1726 !dump_write(file, kaddr,
1729 page_cache_release(page);
1734 page_cache_release(page);
1739 #ifdef ELF_CORE_WRITE_EXTRA_DATA
1740 ELF_CORE_WRITE_EXTRA_DATA;
1743 if ((off_t)file->f_pos != offset) {
1746 "elf_core_dump: file->f_pos (%ld) != offset (%ld)\n",
1747 (off_t)file->f_pos, offset);
1754 while (!list_empty(&thread_list)) {
1755 struct list_head *tmp = thread_list.next;
1757 kfree(list_entry(tmp, struct elf_thread_status, list));
1765 #ifdef ELF_CORE_COPY_XFPREGS
1772 #endif /* USE_ELF_CORE_DUMP */
1774 static int __init init_elf_binfmt(void)
1776 return register_binfmt(&elf_format);
1779 static void __exit exit_elf_binfmt(void)
1781 /* Remove the COFF and ELF loaders. */
1782 unregister_binfmt(&elf_format);
1785 core_initcall(init_elf_binfmt);
1786 module_exit(exit_elf_binfmt);
1787 MODULE_LICENSE("GPL");