2 * linux/fs/binfmt_elf.c
4 * These are the functions used to load ELF format executables as used
5 * on SVr4 machines. Information on the format may be found in the book
6 * "UNIX SYSTEM V RELEASE 4 Programmers Guide: Ansi C and Programming Support
9 * Copyright 1993, 1994: Eric Youngdale (ericy@cais.com).
12 #include <linux/module.h>
13 #include <linux/kernel.h>
15 #include <linux/stat.h>
16 #include <linux/time.h>
18 #include <linux/mman.h>
19 #include <linux/a.out.h>
20 #include <linux/errno.h>
21 #include <linux/signal.h>
22 #include <linux/binfmts.h>
23 #include <linux/string.h>
24 #include <linux/file.h>
25 #include <linux/fcntl.h>
26 #include <linux/ptrace.h>
27 #include <linux/slab.h>
28 #include <linux/shm.h>
29 #include <linux/personality.h>
30 #include <linux/elfcore.h>
31 #include <linux/init.h>
32 #include <linux/highuid.h>
33 #include <linux/smp.h>
34 #include <linux/smp_lock.h>
35 #include <linux/compiler.h>
36 #include <linux/highmem.h>
37 #include <linux/pagemap.h>
38 #include <linux/security.h>
39 #include <linux/syscalls.h>
40 #include <linux/random.h>
41 #include <linux/elf.h>
42 #include <linux/vs_memory.h>
43 #include <linux/vs_cvirt.h>
44 #include <asm/uaccess.h>
45 #include <asm/param.h>
48 static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs);
49 static int load_elf_library(struct file *);
50 static unsigned long elf_map (struct file *, unsigned long, struct elf_phdr *, int, int, unsigned long);
51 extern int dump_fpu (struct pt_regs *, elf_fpregset_t *);
54 #define elf_addr_t unsigned long
58 * If we don't support core dumping, then supply a NULL so we
61 #if defined(USE_ELF_CORE_DUMP) && defined(CONFIG_ELF_CORE)
62 static int elf_core_dump(long signr, struct pt_regs *regs, struct file *file);
64 #define elf_core_dump NULL
67 #if ELF_EXEC_PAGESIZE > PAGE_SIZE
68 #define ELF_MIN_ALIGN ELF_EXEC_PAGESIZE
70 #define ELF_MIN_ALIGN PAGE_SIZE
73 #ifndef ELF_CORE_EFLAGS
74 #define ELF_CORE_EFLAGS 0
77 #define ELF_PAGESTART(_v) ((_v) & ~(unsigned long)(ELF_MIN_ALIGN-1))
78 #define ELF_PAGEOFFSET(_v) ((_v) & (ELF_MIN_ALIGN-1))
79 #define ELF_PAGEALIGN(_v) (((_v) + ELF_MIN_ALIGN - 1) & ~(ELF_MIN_ALIGN - 1))
81 static struct linux_binfmt elf_format = {
82 .module = THIS_MODULE,
83 .load_binary = load_elf_binary,
84 .load_shlib = load_elf_library,
85 .core_dump = elf_core_dump,
86 .min_coredump = ELF_EXEC_PAGESIZE
89 #define BAD_ADDR(x) ((unsigned long)(x) >= PAGE_MASK)
91 static int set_brk(unsigned long start, unsigned long end)
93 start = ELF_PAGEALIGN(start);
94 end = ELF_PAGEALIGN(end);
97 down_write(¤t->mm->mmap_sem);
98 addr = do_brk(start, end - start);
99 up_write(¤t->mm->mmap_sem);
103 current->mm->start_brk = current->mm->brk = end;
107 /* We need to explicitly zero any fractional pages
108 after the data section (i.e. bss). This would
109 contain the junk from the file that should not
112 static int padzero(unsigned long elf_bss)
116 nbyte = ELF_PAGEOFFSET(elf_bss);
118 nbyte = ELF_MIN_ALIGN - nbyte;
119 if (clear_user((void __user *) elf_bss, nbyte))
125 /* Let's use some macros to make this stack manipulation a litle clearer */
126 #ifdef CONFIG_STACK_GROWSUP
127 #define STACK_ADD(sp, items) ((elf_addr_t __user *)(sp) + (items))
128 #define STACK_ROUND(sp, items) \
129 ((15 + (unsigned long) ((sp) + (items))) &~ 15UL)
130 #define STACK_ALLOC(sp, len) ({ \
131 elf_addr_t __user *old_sp = (elf_addr_t __user *)sp; sp += len; \
134 #define STACK_ADD(sp, items) ((elf_addr_t __user *)(sp) - (items))
135 #define STACK_ROUND(sp, items) \
136 (((unsigned long) (sp - items)) &~ 15UL)
137 #define STACK_ALLOC(sp, len) ({ sp -= len ; sp; })
141 create_elf_tables(struct linux_binprm *bprm, struct elfhdr *exec,
142 int interp_aout, unsigned long load_addr,
143 unsigned long interp_load_addr)
145 unsigned long p = bprm->p;
146 int argc = bprm->argc;
147 int envc = bprm->envc;
148 elf_addr_t __user *argv;
149 elf_addr_t __user *envp;
150 elf_addr_t __user *sp;
151 elf_addr_t __user *u_platform;
152 const char *k_platform = ELF_PLATFORM;
154 elf_addr_t *elf_info;
156 struct task_struct *tsk = current;
159 * If this architecture has a platform capability string, copy it
160 * to userspace. In some cases (Sparc), this info is impossible
161 * for userspace to get any other way, in others (i386) it is
166 size_t len = strlen(k_platform) + 1;
169 * In some cases (e.g. Hyper-Threading), we want to avoid L1
170 * evictions by the processes running on the same package. One
171 * thing we can do is to shuffle the initial stack for them.
174 p = arch_align_stack(p);
176 u_platform = (elf_addr_t __user *)STACK_ALLOC(p, len);
177 if (__copy_to_user(u_platform, k_platform, len))
181 /* Create the ELF interpreter info */
182 elf_info = (elf_addr_t *)current->mm->saved_auxv;
183 #define NEW_AUX_ENT(id, val) \
185 elf_info[ei_index++] = id; \
186 elf_info[ei_index++] = val; \
191 * ARCH_DLINFO must come first so PPC can do its special alignment of
196 NEW_AUX_ENT(AT_HWCAP, ELF_HWCAP);
197 NEW_AUX_ENT(AT_PAGESZ, ELF_EXEC_PAGESIZE);
198 NEW_AUX_ENT(AT_CLKTCK, CLOCKS_PER_SEC);
199 NEW_AUX_ENT(AT_PHDR, load_addr + exec->e_phoff);
200 NEW_AUX_ENT(AT_PHENT, sizeof(struct elf_phdr));
201 NEW_AUX_ENT(AT_PHNUM, exec->e_phnum);
202 NEW_AUX_ENT(AT_BASE, interp_load_addr);
203 NEW_AUX_ENT(AT_FLAGS, 0);
204 NEW_AUX_ENT(AT_ENTRY, exec->e_entry);
205 NEW_AUX_ENT(AT_UID, tsk->uid);
206 NEW_AUX_ENT(AT_EUID, tsk->euid);
207 NEW_AUX_ENT(AT_GID, tsk->gid);
208 NEW_AUX_ENT(AT_EGID, tsk->egid);
209 NEW_AUX_ENT(AT_SECURE, security_bprm_secureexec(bprm));
211 NEW_AUX_ENT(AT_PLATFORM,
212 (elf_addr_t)(unsigned long)u_platform);
214 if (bprm->interp_flags & BINPRM_FLAGS_EXECFD) {
215 NEW_AUX_ENT(AT_EXECFD, bprm->interp_data);
218 /* AT_NULL is zero; clear the rest too */
219 memset(&elf_info[ei_index], 0,
220 sizeof current->mm->saved_auxv - ei_index * sizeof elf_info[0]);
222 /* And advance past the AT_NULL entry. */
225 sp = STACK_ADD(p, ei_index);
227 items = (argc + 1) + (envc + 1);
229 items += 3; /* a.out interpreters require argv & envp too */
231 items += 1; /* ELF interpreters only put argc on the stack */
233 bprm->p = STACK_ROUND(sp, items);
235 /* Point sp at the lowest address on the stack */
236 #ifdef CONFIG_STACK_GROWSUP
237 sp = (elf_addr_t __user *)bprm->p - items - ei_index;
238 bprm->exec = (unsigned long)sp; /* XXX: PARISC HACK */
240 sp = (elf_addr_t __user *)bprm->p;
243 /* Now, let's put argc (and argv, envp if appropriate) on the stack */
244 if (__put_user(argc, sp++))
248 envp = argv + argc + 1;
249 __put_user((elf_addr_t)(unsigned long)argv, sp++);
250 __put_user((elf_addr_t)(unsigned long)envp, sp++);
253 envp = argv + argc + 1;
256 /* Populate argv and envp */
257 p = current->mm->arg_end = current->mm->arg_start;
260 __put_user((elf_addr_t)p, argv++);
261 len = strnlen_user((void __user *)p, PAGE_SIZE*MAX_ARG_PAGES);
262 if (!len || len > PAGE_SIZE*MAX_ARG_PAGES)
266 if (__put_user(0, argv))
268 current->mm->arg_end = current->mm->env_start = p;
271 __put_user((elf_addr_t)p, envp++);
272 len = strnlen_user((void __user *)p, PAGE_SIZE*MAX_ARG_PAGES);
273 if (!len || len > PAGE_SIZE*MAX_ARG_PAGES)
277 if (__put_user(0, envp))
279 current->mm->env_end = p;
281 /* Put the elf_info on the stack in the right place. */
282 sp = (elf_addr_t __user *)envp + 1;
283 if (copy_to_user(sp, elf_info, ei_index * sizeof(elf_addr_t)))
290 static unsigned long elf_map(struct file *filep, unsigned long addr,
291 struct elf_phdr *eppnt, int prot, int type,
292 unsigned long total_size)
294 unsigned long map_addr;
295 unsigned long size = eppnt->p_filesz + ELF_PAGEOFFSET(eppnt->p_vaddr);
296 unsigned long off = eppnt->p_offset - ELF_PAGEOFFSET(eppnt->p_vaddr);
298 addr = ELF_PAGESTART(addr);
299 size = ELF_PAGEALIGN(size);
301 /* mmap() will return -EINVAL if given a zero size, but a
302 * segment with zero filesize is perfectly valid */
306 down_write(¤t->mm->mmap_sem);
308 * total_size is the size of the ELF (interpreter) image.
309 * The _first_ mmap needs to know the full size, otherwise
310 * randomization might put this image into an overlapping
311 * position with the ELF binary image. (since size < total_size)
312 * So we first map the 'big' image - and unmap the remainder at
313 * the end. (which unmap is needed for ELF images with holes.)
316 total_size = ELF_PAGEALIGN(total_size);
317 map_addr = do_mmap(filep, addr, total_size, prot, type, off);
318 if (!BAD_ADDR(map_addr))
319 do_munmap(current->mm, map_addr+size, total_size-size);
321 map_addr = do_mmap(filep, addr, size, prot, type, off);
323 up_write(¤t->mm->mmap_sem);
327 #endif /* !elf_map */
329 static inline unsigned long total_mapping_size(struct elf_phdr *cmds, int nr)
331 int i, first_idx = -1, last_idx = -1;
333 for (i = 0; i < nr; i++)
334 if (cmds[i].p_type == PT_LOAD) {
343 return cmds[last_idx].p_vaddr + cmds[last_idx].p_memsz -
344 ELF_PAGESTART(cmds[first_idx].p_vaddr);
348 /* This is much more generalized than the library routine read function,
349 so we keep this separate. Technically the library read function
350 is only provided so that we can read a.out libraries that have
353 static unsigned long load_elf_interp(struct elfhdr *interp_elf_ex,
354 struct file *interpreter, unsigned long *interp_map_addr,
355 unsigned long no_base)
357 struct elf_phdr *elf_phdata;
358 struct elf_phdr *eppnt;
359 unsigned long load_addr = 0;
360 int load_addr_set = 0;
361 unsigned long last_bss = 0, elf_bss = 0;
362 unsigned long error = ~0UL;
363 unsigned long total_size;
366 /* First of all, some simple consistency checks */
367 if (interp_elf_ex->e_type != ET_EXEC &&
368 interp_elf_ex->e_type != ET_DYN)
370 if (!elf_check_arch(interp_elf_ex))
372 if (!interpreter->f_op || !interpreter->f_op->mmap)
376 * If the size of this structure has changed, then punt, since
377 * we will be doing the wrong thing.
379 if (interp_elf_ex->e_phentsize != sizeof(struct elf_phdr))
381 if (interp_elf_ex->e_phnum < 1 ||
382 interp_elf_ex->e_phnum > 65536U / sizeof(struct elf_phdr))
385 /* Now read in all of the header information */
386 size = sizeof(struct elf_phdr) * interp_elf_ex->e_phnum;
387 if (size > ELF_MIN_ALIGN)
389 elf_phdata = kmalloc(size, GFP_KERNEL);
393 retval = kernel_read(interpreter, interp_elf_ex->e_phoff,
394 (char *)elf_phdata,size);
396 if (retval != size) {
402 total_size = total_mapping_size(elf_phdata, interp_elf_ex->e_phnum);
407 for (i = 0; i < interp_elf_ex->e_phnum; i++, eppnt++) {
408 if (eppnt->p_type == PT_LOAD) {
409 int elf_type = MAP_PRIVATE | MAP_DENYWRITE;
411 unsigned long vaddr = 0;
412 unsigned long k, map_addr;
414 if (eppnt->p_flags & PF_R)
415 elf_prot = PROT_READ;
416 if (eppnt->p_flags & PF_W)
417 elf_prot |= PROT_WRITE;
418 if (eppnt->p_flags & PF_X)
419 elf_prot |= PROT_EXEC;
420 vaddr = eppnt->p_vaddr;
421 if (interp_elf_ex->e_type == ET_EXEC || load_addr_set)
422 elf_type |= MAP_FIXED;
423 else if (no_base && interp_elf_ex->e_type == ET_DYN)
426 map_addr = elf_map(interpreter, load_addr + vaddr,
427 eppnt, elf_prot, elf_type, total_size);
429 if (!*interp_map_addr)
430 *interp_map_addr = map_addr;
432 if (BAD_ADDR(map_addr))
435 if (!load_addr_set &&
436 interp_elf_ex->e_type == ET_DYN) {
437 load_addr = map_addr - ELF_PAGESTART(vaddr);
442 * Check to see if the section's size will overflow the
443 * allowed task size. Note that p_filesz must always be
444 * <= p_memsize so it's only necessary to check p_memsz.
446 k = load_addr + eppnt->p_vaddr;
448 eppnt->p_filesz > eppnt->p_memsz ||
449 eppnt->p_memsz > TASK_SIZE ||
450 TASK_SIZE - eppnt->p_memsz < k) {
456 * Find the end of the file mapping for this phdr, and
457 * keep track of the largest address we see for this.
459 k = load_addr + eppnt->p_vaddr + eppnt->p_filesz;
464 * Do the same thing for the memory mapping - between
465 * elf_bss and last_bss is the bss section.
467 k = load_addr + eppnt->p_memsz + eppnt->p_vaddr;
474 * Now fill out the bss section. First pad the last page up
475 * to the page boundary, and then perform a mmap to make sure
476 * that there are zero-mapped pages up to and including the
479 if (padzero(elf_bss)) {
484 /* What we have mapped so far */
485 elf_bss = ELF_PAGESTART(elf_bss + ELF_MIN_ALIGN - 1);
487 /* Map the last of the bss segment */
488 if (last_bss > elf_bss) {
489 down_write(¤t->mm->mmap_sem);
490 error = do_brk(elf_bss, last_bss - elf_bss);
491 up_write(¤t->mm->mmap_sem);
504 static unsigned long load_aout_interp(struct exec *interp_ex,
505 struct file *interpreter)
507 unsigned long text_data, elf_entry = ~0UL;
511 current->mm->end_code = interp_ex->a_text;
512 text_data = interp_ex->a_text + interp_ex->a_data;
513 current->mm->end_data = text_data;
514 current->mm->brk = interp_ex->a_bss + text_data;
516 switch (N_MAGIC(*interp_ex)) {
519 addr = (char __user *)0;
523 offset = N_TXTOFF(*interp_ex);
524 addr = (char __user *)N_TXTADDR(*interp_ex);
530 down_write(¤t->mm->mmap_sem);
531 do_brk(0, text_data);
532 up_write(¤t->mm->mmap_sem);
533 if (!interpreter->f_op || !interpreter->f_op->read)
535 if (interpreter->f_op->read(interpreter, addr, text_data, &offset) < 0)
537 flush_icache_range((unsigned long)addr,
538 (unsigned long)addr + text_data);
540 down_write(¤t->mm->mmap_sem);
541 do_brk(ELF_PAGESTART(text_data + ELF_MIN_ALIGN - 1),
543 up_write(¤t->mm->mmap_sem);
544 elf_entry = interp_ex->a_entry;
551 * These are the functions used to load ELF style executables and shared
552 * libraries. There is no binary dependent code anywhere else.
555 #define INTERPRETER_NONE 0
556 #define INTERPRETER_AOUT 1
557 #define INTERPRETER_ELF 2
559 #ifndef STACK_RND_MASK
560 #define STACK_RND_MASK 0x7ff /* with 4K pages 8MB of VA */
563 static unsigned long randomize_stack_top(unsigned long stack_top)
565 unsigned int random_variable = 0;
567 if (current->flags & PF_RANDOMIZE) {
568 random_variable = get_random_int() & STACK_RND_MASK;
569 random_variable <<= PAGE_SHIFT;
571 #ifdef CONFIG_STACK_GROWSUP
572 return PAGE_ALIGN(stack_top) + random_variable;
574 return PAGE_ALIGN(stack_top) - random_variable;
578 static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
580 struct file *interpreter = NULL; /* to shut gcc up */
581 unsigned long load_addr = 0, load_bias = 0;
582 int load_addr_set = 0;
583 char * elf_interpreter = NULL;
584 unsigned int interpreter_type = INTERPRETER_NONE;
585 unsigned char ibcs2_interpreter = 0;
587 struct elf_phdr *elf_ppnt, *elf_phdata;
588 unsigned long elf_bss, elf_brk;
592 unsigned long elf_entry, interp_load_addr = 0, interp_map_addr = 0;
593 unsigned long start_code, end_code, start_data, end_data;
594 unsigned long reloc_func_desc = 0;
595 char passed_fileno[6];
596 struct files_struct *files;
597 int have_pt_gnu_stack, executable_stack;
598 unsigned long def_flags = 0;
600 struct elfhdr elf_ex;
601 struct elfhdr interp_elf_ex;
602 struct exec interp_ex;
605 loc = kmalloc(sizeof(*loc), GFP_KERNEL);
611 /* Get the exec-header */
612 loc->elf_ex = *((struct elfhdr *)bprm->buf);
615 /* First of all, some simple consistency checks */
616 if (memcmp(loc->elf_ex.e_ident, ELFMAG, SELFMAG) != 0)
619 if (loc->elf_ex.e_type != ET_EXEC && loc->elf_ex.e_type != ET_DYN)
621 if (!elf_check_arch(&loc->elf_ex))
623 if (!bprm->file->f_op||!bprm->file->f_op->mmap)
626 /* Now read in all of the header information */
627 if (loc->elf_ex.e_phentsize != sizeof(struct elf_phdr))
629 if (loc->elf_ex.e_phnum < 1 ||
630 loc->elf_ex.e_phnum > 65536U / sizeof(struct elf_phdr))
632 size = loc->elf_ex.e_phnum * sizeof(struct elf_phdr);
634 elf_phdata = kmalloc(size, GFP_KERNEL);
638 retval = kernel_read(bprm->file, loc->elf_ex.e_phoff,
639 (char *)elf_phdata, size);
640 if (retval != size) {
646 files = current->files; /* Refcounted so ok */
647 retval = unshare_files();
650 if (files == current->files) {
651 put_files_struct(files);
655 /* exec will make our files private anyway, but for the a.out
656 loader stuff we need to do it earlier */
657 retval = get_unused_fd();
660 get_file(bprm->file);
661 fd_install(elf_exec_fileno = retval, bprm->file);
663 elf_ppnt = elf_phdata;
672 for (i = 0; i < loc->elf_ex.e_phnum; i++) {
673 if (elf_ppnt->p_type == PT_INTERP) {
674 /* This is the program interpreter used for
675 * shared libraries - for now assume that this
676 * is an a.out format binary
679 if (elf_ppnt->p_filesz > PATH_MAX ||
680 elf_ppnt->p_filesz < 2)
684 elf_interpreter = kmalloc(elf_ppnt->p_filesz,
686 if (!elf_interpreter)
689 retval = kernel_read(bprm->file, elf_ppnt->p_offset,
692 if (retval != elf_ppnt->p_filesz) {
695 goto out_free_interp;
697 /* make sure path is NULL terminated */
699 if (elf_interpreter[elf_ppnt->p_filesz - 1] != '\0')
700 goto out_free_interp;
702 /* If the program interpreter is one of these two,
703 * then assume an iBCS2 image. Otherwise assume
704 * a native linux image.
706 if (strcmp(elf_interpreter,"/usr/lib/libc.so.1") == 0 ||
707 strcmp(elf_interpreter,"/usr/lib/ld.so.1") == 0)
708 ibcs2_interpreter = 1;
711 * The early SET_PERSONALITY here is so that the lookup
712 * for the interpreter happens in the namespace of the
713 * to-be-execed image. SET_PERSONALITY can select an
716 * However, SET_PERSONALITY is NOT allowed to switch
717 * this task into the new images's memory mapping
718 * policy - that is, TASK_SIZE must still evaluate to
719 * that which is appropriate to the execing application.
720 * This is because exit_mmap() needs to have TASK_SIZE
721 * evaluate to the size of the old image.
723 * So if (say) a 64-bit application is execing a 32-bit
724 * application it is the architecture's responsibility
725 * to defer changing the value of TASK_SIZE until the
726 * switch really is going to happen - do this in
727 * flush_thread(). - akpm
729 SET_PERSONALITY(loc->elf_ex, ibcs2_interpreter);
731 interpreter = open_exec(elf_interpreter);
732 retval = PTR_ERR(interpreter);
733 if (IS_ERR(interpreter))
734 goto out_free_interp;
735 retval = kernel_read(interpreter, 0, bprm->buf,
737 if (retval != BINPRM_BUF_SIZE) {
740 goto out_free_dentry;
743 /* Get the exec headers */
744 loc->interp_ex = *((struct exec *)bprm->buf);
745 loc->interp_elf_ex = *((struct elfhdr *)bprm->buf);
751 elf_ppnt = elf_phdata;
752 executable_stack = EXSTACK_DEFAULT;
754 for (i = 0; i < loc->elf_ex.e_phnum; i++, elf_ppnt++)
755 if (elf_ppnt->p_type == PT_GNU_STACK) {
756 if (elf_ppnt->p_flags & PF_X)
757 executable_stack = EXSTACK_ENABLE_X;
759 executable_stack = EXSTACK_DISABLE_X;
762 have_pt_gnu_stack = (i < loc->elf_ex.e_phnum);
764 if (current->personality == PER_LINUX && (exec_shield & 2)) {
765 executable_stack = EXSTACK_DISABLE_X;
766 current->flags |= PF_RANDOMIZE;
769 /* Some simple consistency checks for the interpreter */
770 if (elf_interpreter) {
771 interpreter_type = INTERPRETER_ELF | INTERPRETER_AOUT;
773 /* Now figure out which format our binary is */
774 if ((N_MAGIC(loc->interp_ex) != OMAGIC) &&
775 (N_MAGIC(loc->interp_ex) != ZMAGIC) &&
776 (N_MAGIC(loc->interp_ex) != QMAGIC))
777 interpreter_type = INTERPRETER_ELF;
779 if (memcmp(loc->interp_elf_ex.e_ident, ELFMAG, SELFMAG) != 0)
780 interpreter_type &= ~INTERPRETER_ELF;
783 if (!interpreter_type)
784 goto out_free_dentry;
786 /* Make sure only one type was selected */
787 if ((interpreter_type & INTERPRETER_ELF) &&
788 interpreter_type != INTERPRETER_ELF) {
789 // FIXME - ratelimit this before re-enabling
790 // printk(KERN_WARNING "ELF: Ambiguous type, using ELF\n");
791 interpreter_type = INTERPRETER_ELF;
793 /* Verify the interpreter has a valid arch */
794 if ((interpreter_type == INTERPRETER_ELF) &&
795 !elf_check_arch(&loc->interp_elf_ex))
796 goto out_free_dentry;
798 /* Executables without an interpreter also need a personality */
799 SET_PERSONALITY(loc->elf_ex, ibcs2_interpreter);
802 /* OK, we are done with that, now set up the arg stuff,
803 and then start this sucker up */
804 if ((!bprm->sh_bang) && (interpreter_type == INTERPRETER_AOUT)) {
805 char *passed_p = passed_fileno;
806 sprintf(passed_fileno, "%d", elf_exec_fileno);
808 if (elf_interpreter) {
809 retval = copy_strings_kernel(1, &passed_p, bprm);
811 goto out_free_dentry;
816 /* Flush all traces of the currently running executable */
817 retval = flush_old_exec(bprm);
819 goto out_free_dentry;
823 * Turn off the CS limit completely if exec-shield disabled or
826 if (!exec_shield || executable_stack != EXSTACK_DISABLE_X || nx_enabled)
827 arch_add_exec_range(current->mm, -1);
830 /* Discard our unneeded old files struct */
832 put_files_struct(files);
836 /* OK, This is the point of no return */
837 current->mm->start_data = 0;
838 current->mm->end_data = 0;
839 current->mm->end_code = 0;
840 current->mm->mmap = NULL;
841 current->flags &= ~PF_FORKNOEXEC;
842 current->mm->def_flags = def_flags;
844 /* Do this immediately, since STACK_TOP as used in setup_arg_pages
845 may depend on the personality. */
846 SET_PERSONALITY(loc->elf_ex, ibcs2_interpreter);
847 if (!(exec_shield & 2) &&
848 elf_read_implies_exec(loc->elf_ex, executable_stack))
849 current->personality |= READ_IMPLIES_EXEC;
851 if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
852 current->flags |= PF_RANDOMIZE;
853 arch_pick_mmap_layout(current->mm);
855 /* Do this so that we can load the interpreter, if need be. We will
856 change some of these later */
857 current->mm->free_area_cache = current->mm->mmap_base;
858 current->mm->cached_hole_size = 0;
859 retval = setup_arg_pages(bprm, randomize_stack_top(STACK_TOP),
862 send_sig(SIGKILL, current, 0);
863 goto out_free_dentry;
866 current->mm->start_stack = bprm->p;
868 /* Now we do a little grungy work by mmaping the ELF image into
869 the correct location in memory.
871 for(i = 0, elf_ppnt = elf_phdata;
872 i < loc->elf_ex.e_phnum; i++, elf_ppnt++) {
873 int elf_prot = 0, elf_flags;
874 unsigned long k, vaddr;
876 if (elf_ppnt->p_type != PT_LOAD)
879 if (unlikely (elf_brk > elf_bss)) {
882 /* There was a PT_LOAD segment with p_memsz > p_filesz
883 before this one. Map anonymous pages, if needed,
884 and clear the area. */
885 retval = set_brk (elf_bss + load_bias,
886 elf_brk + load_bias);
888 send_sig(SIGKILL, current, 0);
889 goto out_free_dentry;
891 nbyte = ELF_PAGEOFFSET(elf_bss);
893 nbyte = ELF_MIN_ALIGN - nbyte;
894 if (nbyte > elf_brk - elf_bss)
895 nbyte = elf_brk - elf_bss;
896 if (clear_user((void __user *)elf_bss +
899 * This bss-zeroing can fail if the ELF
900 * file specifies odd protections. So
901 * we don't check the return value
907 if (elf_ppnt->p_flags & PF_R)
908 elf_prot |= PROT_READ;
909 if (elf_ppnt->p_flags & PF_W)
910 elf_prot |= PROT_WRITE;
911 if (elf_ppnt->p_flags & PF_X)
912 elf_prot |= PROT_EXEC;
914 elf_flags = MAP_PRIVATE | MAP_DENYWRITE | MAP_EXECUTABLE;
916 vaddr = elf_ppnt->p_vaddr;
917 if (loc->elf_ex.e_type == ET_EXEC || load_addr_set)
918 elf_flags |= MAP_FIXED;
919 else if (loc->elf_ex.e_type == ET_DYN)
923 load_bias = ELF_PAGESTART(ELF_ET_DYN_BASE - vaddr);
926 error = elf_map(bprm->file, load_bias + vaddr, elf_ppnt,
927 elf_prot, elf_flags, 0);
928 if (BAD_ADDR(error)) {
929 send_sig(SIGKILL, current, 0);
930 goto out_free_dentry;
933 if (!load_addr_set) {
935 load_addr = (elf_ppnt->p_vaddr - elf_ppnt->p_offset);
936 if (loc->elf_ex.e_type == ET_DYN) {
938 ELF_PAGESTART(load_bias + vaddr);
939 load_addr += load_bias;
940 reloc_func_desc = load_bias;
943 k = elf_ppnt->p_vaddr;
950 * Check to see if the section's size will overflow the
951 * allowed task size. Note that p_filesz must always be
952 * <= p_memsz so it is only necessary to check p_memsz.
954 if (BAD_ADDR(k) || elf_ppnt->p_filesz > elf_ppnt->p_memsz ||
955 elf_ppnt->p_memsz > TASK_SIZE ||
956 TASK_SIZE - elf_ppnt->p_memsz < k) {
957 /* set_brk can never work. Avoid overflows. */
958 send_sig(SIGKILL, current, 0);
959 goto out_free_dentry;
962 k = elf_ppnt->p_vaddr + elf_ppnt->p_filesz;
966 if ((elf_ppnt->p_flags & PF_X) && end_code < k)
970 k = elf_ppnt->p_vaddr + elf_ppnt->p_memsz;
975 loc->elf_ex.e_entry += load_bias;
976 elf_bss += load_bias;
977 elf_brk += load_bias;
978 start_code += load_bias;
979 end_code += load_bias;
980 start_data += load_bias;
981 end_data += load_bias;
983 /* Calling set_brk effectively mmaps the pages that we need
984 * for the bss and break sections. We must do this before
985 * mapping in the interpreter, to make sure it doesn't wind
986 * up getting placed where the bss needs to go.
988 retval = set_brk(elf_bss, elf_brk);
990 send_sig(SIGKILL, current, 0);
991 goto out_free_dentry;
993 if (likely(elf_bss != elf_brk) && unlikely(padzero(elf_bss))) {
994 send_sig(SIGSEGV, current, 0);
995 retval = -EFAULT; /* Nobody gets to see this, but.. */
996 goto out_free_dentry;
999 if (elf_interpreter) {
1000 if (interpreter_type == INTERPRETER_AOUT)
1001 elf_entry = load_aout_interp(&loc->interp_ex,
1004 elf_entry = load_elf_interp(&loc->interp_elf_ex,
1008 if (!BAD_ADDR(elf_entry)) {
1009 /* load_elf_interp() returns relocation adjustment */
1010 interp_load_addr = elf_entry;
1011 elf_entry += loc->interp_elf_ex.e_entry;
1014 if (BAD_ADDR(elf_entry)) {
1015 force_sig(SIGSEGV, current);
1016 retval = IS_ERR((void *)elf_entry) ?
1017 (int)elf_entry : -EINVAL;
1018 goto out_free_dentry;
1020 reloc_func_desc = interp_load_addr;
1022 allow_write_access(interpreter);
1024 kfree(elf_interpreter);
1026 elf_entry = loc->elf_ex.e_entry;
1027 if (BAD_ADDR(elf_entry)) {
1028 force_sig(SIGSEGV, current);
1030 goto out_free_dentry;
1034 if (interpreter_type != INTERPRETER_AOUT)
1035 sys_close(elf_exec_fileno);
1037 set_binfmt(&elf_format);
1039 #ifdef ARCH_HAS_SETUP_ADDITIONAL_PAGES
1040 retval = arch_setup_additional_pages(bprm, executable_stack,
1041 start_code, interp_map_addr);
1043 send_sig(SIGKILL, current, 0);
1046 #endif /* ARCH_HAS_SETUP_ADDITIONAL_PAGES */
1050 compute_creds(bprm);
1051 current->flags &= ~PF_FORKNOEXEC;
1052 create_elf_tables(bprm, &loc->elf_ex,
1053 (interpreter_type == INTERPRETER_AOUT),
1054 load_addr, interp_load_addr);
1055 /* N.B. passed_fileno might not be initialized? */
1056 if (interpreter_type == INTERPRETER_AOUT)
1057 current->mm->arg_start += strlen(passed_fileno) + 1;
1058 current->mm->end_code = end_code;
1059 current->mm->start_code = start_code;
1060 current->mm->start_data = start_data;
1061 current->mm->end_data = end_data;
1062 current->mm->start_stack = bprm->p;
1064 #ifdef __HAVE_ARCH_RANDOMIZE_BRK
1065 if (current->flags & PF_RANDOMIZE)
1066 randomize_brk(elf_brk);
1068 if (current->personality & MMAP_PAGE_ZERO) {
1069 /* Why this, you ask??? Well SVr4 maps page 0 as read-only,
1070 and some applications "depend" upon this behavior.
1071 Since we do not have the power to recompile these, we
1072 emulate the SVr4 behavior. Sigh. */
1073 down_write(¤t->mm->mmap_sem);
1074 error = do_mmap(NULL, 0, PAGE_SIZE, PROT_READ | PROT_EXEC,
1075 MAP_FIXED | MAP_PRIVATE, 0);
1076 up_write(¤t->mm->mmap_sem);
1079 #ifdef ELF_PLAT_INIT
1081 * The ABI may specify that certain registers be set up in special
1082 * ways (on i386 %edx is the address of a DT_FINI function, for
1083 * example. In addition, it may also specify (eg, PowerPC64 ELF)
1084 * that the e_entry field is the address of the function descriptor
1085 * for the startup routine, rather than the address of the startup
1086 * routine itself. This macro performs whatever initialization to
1087 * the regs structure is required as well as any relocations to the
1088 * function descriptor entries when executing dynamically links apps.
1090 ELF_PLAT_INIT(regs, reloc_func_desc);
1093 start_thread(regs, elf_entry, bprm->p);
1102 allow_write_access(interpreter);
1106 kfree(elf_interpreter);
1108 sys_close(elf_exec_fileno);
1111 put_files_struct(current->files);
1112 current->files = files;
1119 /* This is really simpleminded and specialized - we are loading an
1120 a.out library that is given an ELF header. */
1121 static int load_elf_library(struct file *file)
1123 struct elf_phdr *elf_phdata;
1124 struct elf_phdr *eppnt;
1125 unsigned long elf_bss, bss, len;
1126 int retval, error, i, j;
1127 struct elfhdr elf_ex;
1130 retval = kernel_read(file, 0, (char *)&elf_ex, sizeof(elf_ex));
1131 if (retval != sizeof(elf_ex))
1134 if (memcmp(elf_ex.e_ident, ELFMAG, SELFMAG) != 0)
1137 /* First of all, some simple consistency checks */
1138 if (elf_ex.e_type != ET_EXEC || elf_ex.e_phnum > 2 ||
1139 !elf_check_arch(&elf_ex) || !file->f_op || !file->f_op->mmap)
1142 /* Now read in all of the header information */
1144 j = sizeof(struct elf_phdr) * elf_ex.e_phnum;
1145 /* j < ELF_MIN_ALIGN because elf_ex.e_phnum <= 2 */
1148 elf_phdata = kmalloc(j, GFP_KERNEL);
1154 retval = kernel_read(file, elf_ex.e_phoff, (char *)eppnt, j);
1158 for (j = 0, i = 0; i<elf_ex.e_phnum; i++)
1159 if ((eppnt + i)->p_type == PT_LOAD)
1164 while (eppnt->p_type != PT_LOAD)
1167 /* Now use mmap to map the library into memory. */
1168 down_write(¤t->mm->mmap_sem);
1169 error = do_mmap(file,
1170 ELF_PAGESTART(eppnt->p_vaddr),
1172 ELF_PAGEOFFSET(eppnt->p_vaddr)),
1173 PROT_READ | PROT_WRITE | PROT_EXEC,
1174 MAP_FIXED | MAP_PRIVATE | MAP_DENYWRITE,
1176 ELF_PAGEOFFSET(eppnt->p_vaddr)));
1177 up_write(¤t->mm->mmap_sem);
1178 if (error != ELF_PAGESTART(eppnt->p_vaddr))
1181 elf_bss = eppnt->p_vaddr + eppnt->p_filesz;
1182 if (padzero(elf_bss)) {
1187 len = ELF_PAGESTART(eppnt->p_filesz + eppnt->p_vaddr +
1189 bss = eppnt->p_memsz + eppnt->p_vaddr;
1191 down_write(¤t->mm->mmap_sem);
1192 do_brk(len, bss - len);
1193 up_write(¤t->mm->mmap_sem);
1204 * Note that some platforms still use traditional core dumps and not
1205 * the ELF core dump. Each platform can select it as appropriate.
1207 #if defined(USE_ELF_CORE_DUMP) && defined(CONFIG_ELF_CORE)
1212 * Modelled on fs/exec.c:aout_core_dump()
1213 * Jeremy Fitzhardinge <jeremy@sw.oz.au>
1216 * These are the only things you should do on a core-file: use only these
1217 * functions to write out all the necessary info.
1219 static int dump_write(struct file *file, const void *addr, int nr)
1221 return file->f_op->write(file, addr, nr, &file->f_pos) == nr;
1224 static int dump_seek(struct file *file, loff_t off)
1226 if (file->f_op->llseek) {
1227 if (file->f_op->llseek(file, off, 0) != off)
1235 * Decide whether a segment is worth dumping; default is yes to be
1236 * sure (missing info is worse than too much; etc).
1237 * Personally I'd include everything, and use the coredump limit...
1239 * I think we should skip something. But I am not sure how. H.J.
1241 static int maydump(struct vm_area_struct *vma)
1243 /* Do not dump I/O mapped devices or special mappings */
1244 if (vma->vm_flags & (VM_IO | VM_RESERVED))
1247 if (vma->vm_flags & VM_DONTEXPAND) /* Kludge for vDSO. */
1250 /* Dump shared memory only if mapped from an anonymous file. */
1251 if (vma->vm_flags & VM_SHARED)
1252 return vma->vm_file->f_dentry->d_inode->i_nlink == 0;
1254 /* If it hasn't been written to, don't write it out */
1261 /* An ELF note in memory */
1266 unsigned int datasz;
1270 static int notesize(struct memelfnote *en)
1274 sz = sizeof(struct elf_note);
1275 sz += roundup(strlen(en->name) + 1, 4);
1276 sz += roundup(en->datasz, 4);
1281 #define DUMP_WRITE(addr, nr) \
1282 do { if (!dump_write(file, (addr), (nr))) return 0; } while(0)
1283 #define DUMP_SEEK(off) \
1284 do { if (!dump_seek(file, (off))) return 0; } while(0)
1286 static int writenote(struct memelfnote *men, struct file *file)
1290 en.n_namesz = strlen(men->name) + 1;
1291 en.n_descsz = men->datasz;
1292 en.n_type = men->type;
1294 DUMP_WRITE(&en, sizeof(en));
1295 DUMP_WRITE(men->name, en.n_namesz);
1296 /* XXX - cast from long long to long to avoid need for libgcc.a */
1297 DUMP_SEEK(roundup((unsigned long)file->f_pos, 4)); /* XXX */
1298 DUMP_WRITE(men->data, men->datasz);
1299 DUMP_SEEK(roundup((unsigned long)file->f_pos, 4)); /* XXX */
1306 #define DUMP_WRITE(addr, nr) \
1307 if ((size += (nr)) > limit || !dump_write(file, (addr), (nr))) \
1309 #define DUMP_SEEK(off) \
1310 if (!dump_seek(file, (off))) \
1313 static void fill_elf_header(struct elfhdr *elf, int segs)
1315 memcpy(elf->e_ident, ELFMAG, SELFMAG);
1316 elf->e_ident[EI_CLASS] = ELF_CLASS;
1317 elf->e_ident[EI_DATA] = ELF_DATA;
1318 elf->e_ident[EI_VERSION] = EV_CURRENT;
1319 elf->e_ident[EI_OSABI] = ELF_OSABI;
1320 memset(elf->e_ident+EI_PAD, 0, EI_NIDENT-EI_PAD);
1322 elf->e_type = ET_CORE;
1323 elf->e_machine = ELF_ARCH;
1324 elf->e_version = EV_CURRENT;
1326 elf->e_phoff = sizeof(struct elfhdr);
1328 elf->e_flags = ELF_CORE_EFLAGS;
1329 elf->e_ehsize = sizeof(struct elfhdr);
1330 elf->e_phentsize = sizeof(struct elf_phdr);
1331 elf->e_phnum = segs;
1332 elf->e_shentsize = 0;
1334 elf->e_shstrndx = 0;
1338 static void fill_elf_note_phdr(struct elf_phdr *phdr, int sz, off_t offset)
1340 phdr->p_type = PT_NOTE;
1341 phdr->p_offset = offset;
1344 phdr->p_filesz = sz;
1351 static void fill_note(struct memelfnote *note, const char *name, int type,
1352 unsigned int sz, void *data)
1362 * fill up all the fields in prstatus from the given task struct, except
1363 * registers which need to be filled up separately.
1365 static void fill_prstatus(struct elf_prstatus *prstatus,
1366 struct task_struct *p, long signr)
1368 prstatus->pr_info.si_signo = prstatus->pr_cursig = signr;
1369 prstatus->pr_sigpend = p->pending.signal.sig[0];
1370 prstatus->pr_sighold = p->blocked.sig[0];
1371 prstatus->pr_pid = p->pid;
1372 prstatus->pr_ppid = p->parent->pid;
1373 prstatus->pr_pgrp = process_group(p);
1374 prstatus->pr_sid = p->signal->session;
1375 if (thread_group_leader(p)) {
1377 * This is the record for the group leader. Add in the
1378 * cumulative times of previous dead threads. This total
1379 * won't include the time of each live thread whose state
1380 * is included in the core dump. The final total reported
1381 * to our parent process when it calls wait4 will include
1382 * those sums as well as the little bit more time it takes
1383 * this and each other thread to finish dying after the
1384 * core dump synchronization phase.
1386 cputime_to_timeval(cputime_add(p->utime, p->signal->utime),
1387 &prstatus->pr_utime);
1388 cputime_to_timeval(cputime_add(p->stime, p->signal->stime),
1389 &prstatus->pr_stime);
1391 cputime_to_timeval(p->utime, &prstatus->pr_utime);
1392 cputime_to_timeval(p->stime, &prstatus->pr_stime);
1394 cputime_to_timeval(p->signal->cutime, &prstatus->pr_cutime);
1395 cputime_to_timeval(p->signal->cstime, &prstatus->pr_cstime);
1398 static int fill_psinfo(struct elf_prpsinfo *psinfo, struct task_struct *p,
1399 struct mm_struct *mm)
1401 unsigned int i, len;
1403 /* first copy the parameters from user space */
1404 memset(psinfo, 0, sizeof(struct elf_prpsinfo));
1406 len = mm->arg_end - mm->arg_start;
1407 if (len >= ELF_PRARGSZ)
1408 len = ELF_PRARGSZ-1;
1409 if (copy_from_user(&psinfo->pr_psargs,
1410 (const char __user *)mm->arg_start, len))
1412 for(i = 0; i < len; i++)
1413 if (psinfo->pr_psargs[i] == 0)
1414 psinfo->pr_psargs[i] = ' ';
1415 psinfo->pr_psargs[len] = 0;
1417 psinfo->pr_pid = p->pid;
1418 psinfo->pr_ppid = p->parent->pid;
1419 psinfo->pr_pgrp = process_group(p);
1420 psinfo->pr_sid = p->signal->session;
1422 i = p->state ? ffz(~p->state) + 1 : 0;
1423 psinfo->pr_state = i;
1424 psinfo->pr_sname = (i > 5) ? '.' : "RSDTZW"[i];
1425 psinfo->pr_zomb = psinfo->pr_sname == 'Z';
1426 psinfo->pr_nice = task_nice(p);
1427 psinfo->pr_flag = p->flags;
1428 SET_UID(psinfo->pr_uid, p->uid);
1429 SET_GID(psinfo->pr_gid, p->gid);
1430 strncpy(psinfo->pr_fname, p->comm, sizeof(psinfo->pr_fname));
1435 /* Here is the structure in which status of each thread is captured. */
1436 struct elf_thread_status
1438 struct list_head list;
1439 struct elf_prstatus prstatus; /* NT_PRSTATUS */
1440 elf_fpregset_t fpu; /* NT_PRFPREG */
1441 struct task_struct *thread;
1442 #ifdef ELF_CORE_COPY_XFPREGS
1443 elf_fpxregset_t xfpu; /* NT_PRXFPREG */
1445 struct memelfnote notes[3];
1450 * In order to add the specific thread information for the elf file format,
1451 * we need to keep a linked list of every threads pr_status and then create
1452 * a single section for them in the final core file.
1454 static int elf_dump_thread_status(long signr, struct elf_thread_status *t)
1457 struct task_struct *p = t->thread;
1460 fill_prstatus(&t->prstatus, p, signr);
1461 elf_core_copy_task_regs(p, &t->prstatus.pr_reg);
1463 fill_note(&t->notes[0], "CORE", NT_PRSTATUS, sizeof(t->prstatus),
1466 sz += notesize(&t->notes[0]);
1468 if ((t->prstatus.pr_fpvalid = elf_core_copy_task_fpregs(p, NULL,
1470 fill_note(&t->notes[1], "CORE", NT_PRFPREG, sizeof(t->fpu),
1473 sz += notesize(&t->notes[1]);
1476 #ifdef ELF_CORE_COPY_XFPREGS
1477 if (elf_core_copy_task_xfpregs(p, &t->xfpu)) {
1478 fill_note(&t->notes[2], "LINUX", NT_PRXFPREG, sizeof(t->xfpu),
1481 sz += notesize(&t->notes[2]);
1490 * This is a two-pass process; first we find the offsets of the bits,
1491 * and then they are actually written out. If we run out of core limit
1494 static int elf_core_dump(long signr, struct pt_regs *regs, struct file *file)
1502 struct vm_area_struct *vma;
1503 struct elfhdr *elf = NULL;
1504 off_t offset = 0, dataoff;
1505 unsigned long limit = current->signal->rlim[RLIMIT_CORE].rlim_cur;
1507 struct memelfnote *notes = NULL;
1508 struct elf_prstatus *prstatus = NULL; /* NT_PRSTATUS */
1509 struct elf_prpsinfo *psinfo = NULL; /* NT_PRPSINFO */
1510 struct task_struct *g, *p;
1511 LIST_HEAD(thread_list);
1512 struct list_head *t;
1513 elf_fpregset_t *fpu = NULL;
1514 #ifdef ELF_CORE_COPY_XFPREGS
1515 elf_fpxregset_t *xfpu = NULL;
1517 int thread_status_size = 0;
1521 * We no longer stop all VM operations.
1523 * This is because those proceses that could possibly change map_count
1524 * or the mmap / vma pages are now blocked in do_exit on current
1525 * finishing this core dump.
1527 * Only ptrace can touch these memory addresses, but it doesn't change
1528 * the map_count or the pages allocated. So no possibility of crashing
1529 * exists while dumping the mm->vm_next areas to the core file.
1532 /* alloc memory for large data structures: too large to be on stack */
1533 elf = kmalloc(sizeof(*elf), GFP_KERNEL);
1536 prstatus = kmalloc(sizeof(*prstatus), GFP_KERNEL);
1539 psinfo = kmalloc(sizeof(*psinfo), GFP_KERNEL);
1542 notes = kmalloc(NUM_NOTES * sizeof(struct memelfnote), GFP_KERNEL);
1545 fpu = kmalloc(sizeof(*fpu), GFP_KERNEL);
1548 #ifdef ELF_CORE_COPY_XFPREGS
1549 xfpu = kmalloc(sizeof(*xfpu), GFP_KERNEL);
1555 struct elf_thread_status *tmp;
1556 read_lock(&tasklist_lock);
1558 if (current->mm == p->mm && current != p) {
1559 tmp = kzalloc(sizeof(*tmp), GFP_ATOMIC);
1561 read_unlock(&tasklist_lock);
1564 INIT_LIST_HEAD(&tmp->list);
1566 list_add(&tmp->list, &thread_list);
1568 while_each_thread(g,p);
1569 read_unlock(&tasklist_lock);
1570 list_for_each(t, &thread_list) {
1571 struct elf_thread_status *tmp;
1574 tmp = list_entry(t, struct elf_thread_status, list);
1575 sz = elf_dump_thread_status(signr, tmp);
1576 thread_status_size += sz;
1579 /* now collect the dump for the current */
1580 memset(prstatus, 0, sizeof(*prstatus));
1581 fill_prstatus(prstatus, current, signr);
1582 elf_core_copy_regs(&prstatus->pr_reg, regs);
1584 segs = current->mm->map_count;
1585 #ifdef ELF_CORE_EXTRA_PHDRS
1586 segs += ELF_CORE_EXTRA_PHDRS;
1590 fill_elf_header(elf, segs + 1); /* including notes section */
1593 current->flags |= PF_DUMPCORE;
1596 * Set up the notes in similar form to SVR4 core dumps made
1597 * with info from their /proc.
1600 fill_note(notes + 0, "CORE", NT_PRSTATUS, sizeof(*prstatus), prstatus);
1601 fill_psinfo(psinfo, current->group_leader, current->mm);
1602 fill_note(notes + 1, "CORE", NT_PRPSINFO, sizeof(*psinfo), psinfo);
1606 auxv = (elf_addr_t *)current->mm->saved_auxv;
1611 while (auxv[i - 2] != AT_NULL);
1612 fill_note(¬es[numnote++], "CORE", NT_AUXV,
1613 i * sizeof(elf_addr_t), auxv);
1615 /* Try to dump the FPU. */
1616 if ((prstatus->pr_fpvalid =
1617 elf_core_copy_task_fpregs(current, regs, fpu)))
1618 fill_note(notes + numnote++,
1619 "CORE", NT_PRFPREG, sizeof(*fpu), fpu);
1620 #ifdef ELF_CORE_COPY_XFPREGS
1621 if (elf_core_copy_task_xfpregs(current, xfpu))
1622 fill_note(notes + numnote++,
1623 "LINUX", NT_PRXFPREG, sizeof(*xfpu), xfpu);
1629 DUMP_WRITE(elf, sizeof(*elf));
1630 offset += sizeof(*elf); /* Elf header */
1631 offset += (segs+1) * sizeof(struct elf_phdr); /* Program headers */
1633 /* Write notes phdr entry */
1635 struct elf_phdr phdr;
1638 for (i = 0; i < numnote; i++)
1639 sz += notesize(notes + i);
1641 sz += thread_status_size;
1643 fill_elf_note_phdr(&phdr, sz, offset);
1645 DUMP_WRITE(&phdr, sizeof(phdr));
1648 /* Page-align dumped data */
1649 dataoff = offset = roundup(offset, ELF_EXEC_PAGESIZE);
1651 /* Write program headers for segments dump */
1652 for (vma = current->mm->mmap; vma != NULL; vma = vma->vm_next) {
1653 struct elf_phdr phdr;
1656 sz = vma->vm_end - vma->vm_start;
1658 phdr.p_type = PT_LOAD;
1659 phdr.p_offset = offset;
1660 phdr.p_vaddr = vma->vm_start;
1662 phdr.p_filesz = maydump(vma) ? sz : 0;
1664 offset += phdr.p_filesz;
1665 phdr.p_flags = vma->vm_flags & VM_READ ? PF_R : 0;
1666 if (vma->vm_flags & VM_WRITE)
1667 phdr.p_flags |= PF_W;
1668 if (vma->vm_flags & VM_EXEC)
1669 phdr.p_flags |= PF_X;
1670 phdr.p_align = ELF_EXEC_PAGESIZE;
1672 DUMP_WRITE(&phdr, sizeof(phdr));
1675 #ifdef ELF_CORE_WRITE_EXTRA_PHDRS
1676 ELF_CORE_WRITE_EXTRA_PHDRS;
1679 /* write out the notes section */
1680 for (i = 0; i < numnote; i++)
1681 if (!writenote(notes + i, file))
1684 /* write out the thread status notes section */
1685 list_for_each(t, &thread_list) {
1686 struct elf_thread_status *tmp =
1687 list_entry(t, struct elf_thread_status, list);
1689 for (i = 0; i < tmp->num_notes; i++)
1690 if (!writenote(&tmp->notes[i], file))
1696 for (vma = current->mm->mmap; vma != NULL; vma = vma->vm_next) {
1702 for (addr = vma->vm_start;
1704 addr += PAGE_SIZE) {
1706 struct vm_area_struct *vma;
1708 if (get_user_pages(current, current->mm, addr, 1, 0, 1,
1709 &page, &vma) <= 0) {
1710 DUMP_SEEK(file->f_pos + PAGE_SIZE);
1712 if (page == ZERO_PAGE(addr)) {
1713 DUMP_SEEK(file->f_pos + PAGE_SIZE);
1716 flush_cache_page(vma, addr,
1719 if ((size += PAGE_SIZE) > limit ||
1720 !dump_write(file, kaddr,
1723 page_cache_release(page);
1728 page_cache_release(page);
1733 #ifdef ELF_CORE_WRITE_EXTRA_DATA
1734 ELF_CORE_WRITE_EXTRA_DATA;
1737 if ((off_t)file->f_pos != offset) {
1740 "elf_core_dump: file->f_pos (%ld) != offset (%ld)\n",
1741 (off_t)file->f_pos, offset);
1748 while (!list_empty(&thread_list)) {
1749 struct list_head *tmp = thread_list.next;
1751 kfree(list_entry(tmp, struct elf_thread_status, list));
1759 #ifdef ELF_CORE_COPY_XFPREGS
1766 #endif /* USE_ELF_CORE_DUMP */
1768 static int __init init_elf_binfmt(void)
1770 return register_binfmt(&elf_format);
1773 static void __exit exit_elf_binfmt(void)
1775 /* Remove the COFF and ELF loaders. */
1776 unregister_binfmt(&elf_format);
1779 core_initcall(init_elf_binfmt);
1780 module_exit(exit_elf_binfmt);
1781 MODULE_LICENSE("GPL");