2 * linux/fs/binfmt_elf.c
4 * These are the functions used to load ELF format executables as used
5 * on SVr4 machines. Information on the format may be found in the book
6 * "UNIX SYSTEM V RELEASE 4 Programmers Guide: Ansi C and Programming Support
9 * Copyright 1993, 1994: Eric Youngdale (ericy@cais.com).
12 #include <linux/module.h>
13 #include <linux/kernel.h>
15 #include <linux/stat.h>
16 #include <linux/time.h>
18 #include <linux/mman.h>
19 #include <linux/a.out.h>
20 #include <linux/errno.h>
21 #include <linux/signal.h>
22 #include <linux/binfmts.h>
23 #include <linux/string.h>
24 #include <linux/file.h>
25 #include <linux/fcntl.h>
26 #include <linux/ptrace.h>
27 #include <linux/slab.h>
28 #include <linux/shm.h>
29 #include <linux/personality.h>
30 #include <linux/elfcore.h>
31 #include <linux/init.h>
32 #include <linux/highuid.h>
33 #include <linux/smp.h>
34 #include <linux/smp_lock.h>
35 #include <linux/compiler.h>
36 #include <linux/highmem.h>
37 #include <linux/pagemap.h>
38 #include <linux/security.h>
39 #include <linux/syscalls.h>
40 #include <linux/random.h>
42 #include <asm/uaccess.h>
43 #include <asm/param.h>
45 #include <asm/pgalloc.h>
47 #include <linux/elf.h>
49 static int load_elf_binary(struct linux_binprm * bprm, struct pt_regs * regs);
50 static int load_elf_library(struct file*);
51 static unsigned long elf_map (struct file *, unsigned long, struct elf_phdr *, int, int, unsigned long);
52 extern int dump_fpu (struct pt_regs *, elf_fpregset_t *);
55 #define elf_addr_t unsigned long
59 * If we don't support core dumping, then supply a NULL so we
62 #ifdef USE_ELF_CORE_DUMP
63 static int elf_core_dump(long signr, struct pt_regs * regs, struct file * file);
65 #define elf_core_dump NULL
68 #if ELF_EXEC_PAGESIZE > PAGE_SIZE
69 # define ELF_MIN_ALIGN ELF_EXEC_PAGESIZE
71 # define ELF_MIN_ALIGN PAGE_SIZE
74 #ifndef ELF_CORE_EFLAGS
75 #define ELF_CORE_EFLAGS 0
78 #define ELF_PAGESTART(_v) ((_v) & ~(unsigned long)(ELF_MIN_ALIGN-1))
79 #define ELF_PAGEOFFSET(_v) ((_v) & (ELF_MIN_ALIGN-1))
80 #define ELF_PAGEALIGN(_v) (((_v) + ELF_MIN_ALIGN - 1) & ~(ELF_MIN_ALIGN - 1))
82 static struct linux_binfmt elf_format = {
83 .module = THIS_MODULE,
84 .load_binary = load_elf_binary,
85 .load_shlib = load_elf_library,
86 .core_dump = elf_core_dump,
87 .min_coredump = ELF_EXEC_PAGESIZE
90 #define BAD_ADDR(x) ((unsigned long)(x) > TASK_SIZE)
92 static int set_brk(unsigned long start, unsigned long end)
94 start = ELF_PAGEALIGN(start);
95 end = ELF_PAGEALIGN(end);
98 down_write(¤t->mm->mmap_sem);
99 addr = do_brk(start, end - start);
100 up_write(¤t->mm->mmap_sem);
104 current->mm->start_brk = current->mm->brk = end;
109 /* We need to explicitly zero any fractional pages
110 after the data section (i.e. bss). This would
111 contain the junk from the file that should not
115 static int padzero(unsigned long elf_bss)
119 nbyte = ELF_PAGEOFFSET(elf_bss);
121 nbyte = ELF_MIN_ALIGN - nbyte;
122 if (clear_user((void __user *) elf_bss, nbyte))
128 /* Let's use some macros to make this stack manipulation a litle clearer */
129 #ifdef CONFIG_STACK_GROWSUP
130 #define STACK_ADD(sp, items) ((elf_addr_t __user *)(sp) + (items))
131 #define STACK_ROUND(sp, items) \
132 ((15 + (unsigned long) ((sp) + (items))) &~ 15UL)
133 #define STACK_ALLOC(sp, len) ({ elf_addr_t __user *old_sp = (elf_addr_t __user *)sp; sp += len; old_sp; })
135 #define STACK_ADD(sp, items) ((elf_addr_t __user *)(sp) - (items))
136 #define STACK_ROUND(sp, items) \
137 (((unsigned long) (sp - items)) &~ 15UL)
138 #define STACK_ALLOC(sp, len) ({ sp -= len ; sp; })
142 create_elf_tables(struct linux_binprm *bprm, struct elfhdr * exec,
143 int interp_aout, unsigned long load_addr,
144 unsigned long interp_load_addr)
146 unsigned long p = bprm->p;
147 int argc = bprm->argc;
148 int envc = bprm->envc;
149 elf_addr_t __user *argv;
150 elf_addr_t __user *envp;
151 elf_addr_t __user *sp;
152 elf_addr_t __user *u_platform;
153 const char *k_platform = ELF_PLATFORM;
155 elf_addr_t *elf_info;
157 struct task_struct *tsk = current;
160 * If this architecture has a platform capability string, copy it
161 * to userspace. In some cases (Sparc), this info is impossible
162 * for userspace to get any other way, in others (i386) it is
168 size_t len = strlen(k_platform) + 1;
171 * In some cases (e.g. Hyper-Threading), we want to avoid L1
172 * evictions by the processes running on the same package. One
173 * thing we can do is to shuffle the initial stack for them.
176 p = arch_align_stack(p);
178 u_platform = (elf_addr_t __user *)STACK_ALLOC(p, len);
179 if (__copy_to_user(u_platform, k_platform, len))
183 /* Create the ELF interpreter info */
184 elf_info = (elf_addr_t *) current->mm->saved_auxv;
185 #define NEW_AUX_ENT(id, val) \
186 do { elf_info[ei_index++] = id; elf_info[ei_index++] = val; } while (0)
190 * ARCH_DLINFO must come first so PPC can do its special alignment of
195 NEW_AUX_ENT(AT_HWCAP, ELF_HWCAP);
196 NEW_AUX_ENT(AT_PAGESZ, ELF_EXEC_PAGESIZE);
197 NEW_AUX_ENT(AT_CLKTCK, CLOCKS_PER_SEC);
198 NEW_AUX_ENT(AT_PHDR, load_addr + exec->e_phoff);
199 NEW_AUX_ENT(AT_PHENT, sizeof (struct elf_phdr));
200 NEW_AUX_ENT(AT_PHNUM, exec->e_phnum);
201 NEW_AUX_ENT(AT_BASE, interp_load_addr);
202 NEW_AUX_ENT(AT_FLAGS, 0);
203 NEW_AUX_ENT(AT_ENTRY, exec->e_entry);
204 NEW_AUX_ENT(AT_UID, (elf_addr_t) tsk->uid);
205 NEW_AUX_ENT(AT_EUID, (elf_addr_t) tsk->euid);
206 NEW_AUX_ENT(AT_GID, (elf_addr_t) tsk->gid);
207 NEW_AUX_ENT(AT_EGID, (elf_addr_t) tsk->egid);
208 NEW_AUX_ENT(AT_SECURE, (elf_addr_t) security_bprm_secureexec(bprm));
210 NEW_AUX_ENT(AT_PLATFORM, (elf_addr_t)(unsigned long)u_platform);
212 if (bprm->interp_flags & BINPRM_FLAGS_EXECFD) {
213 NEW_AUX_ENT(AT_EXECFD, (elf_addr_t) bprm->interp_data);
216 /* AT_NULL is zero; clear the rest too */
217 memset(&elf_info[ei_index], 0,
218 sizeof current->mm->saved_auxv - ei_index * sizeof elf_info[0]);
220 /* And advance past the AT_NULL entry. */
223 sp = STACK_ADD(p, ei_index);
225 items = (argc + 1) + (envc + 1);
227 items += 3; /* a.out interpreters require argv & envp too */
229 items += 1; /* ELF interpreters only put argc on the stack */
231 bprm->p = STACK_ROUND(sp, items);
233 /* Point sp at the lowest address on the stack */
234 #ifdef CONFIG_STACK_GROWSUP
235 sp = (elf_addr_t __user *)bprm->p - items - ei_index;
236 bprm->exec = (unsigned long) sp; /* XXX: PARISC HACK */
238 sp = (elf_addr_t __user *)bprm->p;
241 /* Now, let's put argc (and argv, envp if appropriate) on the stack */
242 if (__put_user(argc, sp++))
246 envp = argv + argc + 1;
247 __put_user((elf_addr_t)(unsigned long)argv, sp++);
248 __put_user((elf_addr_t)(unsigned long)envp, sp++);
251 envp = argv + argc + 1;
254 /* Populate argv and envp */
255 p = current->mm->arg_end = current->mm->arg_start;
258 __put_user((elf_addr_t)p, argv++);
259 len = strnlen_user((void __user *)p, PAGE_SIZE*MAX_ARG_PAGES);
260 if (!len || len > PAGE_SIZE*MAX_ARG_PAGES)
264 if (__put_user(0, argv))
266 current->mm->arg_end = current->mm->env_start = p;
269 __put_user((elf_addr_t)p, envp++);
270 len = strnlen_user((void __user *)p, PAGE_SIZE*MAX_ARG_PAGES);
271 if (!len || len > PAGE_SIZE*MAX_ARG_PAGES)
275 if (__put_user(0, envp))
277 current->mm->env_end = p;
279 /* Put the elf_info on the stack in the right place. */
280 sp = (elf_addr_t __user *)envp + 1;
281 if (copy_to_user(sp, elf_info, ei_index * sizeof(elf_addr_t)))
288 static unsigned long elf_map(struct file *filep, unsigned long addr,
289 struct elf_phdr *eppnt, int prot, int type,
290 unsigned long total_size)
292 unsigned long map_addr;
293 unsigned long size = eppnt->p_filesz + ELF_PAGEOFFSET(eppnt->p_vaddr);
294 unsigned long off = eppnt->p_offset - ELF_PAGEOFFSET(eppnt->p_vaddr);
296 addr = ELF_PAGESTART(addr);
297 size = ELF_PAGEALIGN(size);
299 down_write(¤t->mm->mmap_sem);
302 * total_size is the size of the ELF (interpreter) image.
303 * The _first_ mmap needs to know the full size, otherwise
304 * randomization might put this image into an overlapping
305 * position with the ELF binary image. (since size < total_size)
306 * So we first map the 'big' image - and unmap the remainder at
307 * the end. (which unmap is needed for ELF images with holes.)
310 total_size = ELF_PAGEALIGN(total_size);
311 map_addr = do_mmap(filep, addr, total_size, prot, type, off);
312 if (!BAD_ADDR(map_addr))
313 do_munmap(current->mm, map_addr+size, total_size-size);
315 map_addr = do_mmap(filep, addr, size, prot, type, off);
317 up_write(¤t->mm->mmap_sem);
322 #endif /* !elf_map */
324 static inline unsigned long total_mapping_size(struct elf_phdr *cmds, int nr)
326 int i, first_idx = -1, last_idx = -1;
328 for (i = 0; i < nr; i++)
329 if (cmds[i].p_type == PT_LOAD) {
338 return cmds[last_idx].p_vaddr + cmds[last_idx].p_memsz -
339 ELF_PAGESTART(cmds[first_idx].p_vaddr);
342 /* This is much more generalized than the library routine read function,
343 so we keep this separate. Technically the library read function
344 is only provided so that we can read a.out libraries that have
347 static unsigned long load_elf_interp(struct elfhdr * interp_elf_ex,
348 struct file * interpreter,
349 unsigned long *interp_load_addr,
350 unsigned long no_base)
352 struct elf_phdr *elf_phdata;
353 struct elf_phdr *eppnt;
354 unsigned long load_addr = 0;
355 int load_addr_set = 0;
356 unsigned long last_bss = 0, elf_bss = 0;
357 unsigned long error = ~0UL;
358 unsigned long total_size;
361 /* First of all, some simple consistency checks */
362 if (interp_elf_ex->e_type != ET_EXEC &&
363 interp_elf_ex->e_type != ET_DYN)
365 if (!elf_check_arch(interp_elf_ex))
367 if (!interpreter->f_op || !interpreter->f_op->mmap)
371 * If the size of this structure has changed, then punt, since
372 * we will be doing the wrong thing.
374 if (interp_elf_ex->e_phentsize != sizeof(struct elf_phdr))
376 if (interp_elf_ex->e_phnum < 1 ||
377 interp_elf_ex->e_phnum > 65536U / sizeof(struct elf_phdr))
380 /* Now read in all of the header information */
382 size = sizeof(struct elf_phdr) * interp_elf_ex->e_phnum;
383 if (size > ELF_MIN_ALIGN)
385 elf_phdata = (struct elf_phdr *) kmalloc(size, GFP_KERNEL);
389 retval = kernel_read(interpreter,interp_elf_ex->e_phoff,(char *)elf_phdata,size);
391 if (retval != size) {
397 total_size = total_mapping_size(elf_phdata, interp_elf_ex->e_phnum);
402 for (i=0; i<interp_elf_ex->e_phnum; i++, eppnt++) {
403 if (eppnt->p_type == PT_LOAD) {
404 int elf_type = MAP_PRIVATE | MAP_DENYWRITE;
406 unsigned long vaddr = 0;
407 unsigned long k, map_addr;
409 if (eppnt->p_flags & PF_R) elf_prot = PROT_READ;
410 if (eppnt->p_flags & PF_W) elf_prot |= PROT_WRITE;
411 if (eppnt->p_flags & PF_X) elf_prot |= PROT_EXEC;
412 vaddr = eppnt->p_vaddr;
413 if (interp_elf_ex->e_type == ET_EXEC || load_addr_set)
414 elf_type |= MAP_FIXED;
415 else if (no_base && interp_elf_ex->e_type == ET_DYN)
418 map_addr = elf_map(interpreter, load_addr + vaddr, eppnt, elf_prot, elf_type, total_size);
421 if (BAD_ADDR(map_addr))
424 if (!load_addr_set && interp_elf_ex->e_type == ET_DYN) {
425 load_addr = map_addr - ELF_PAGESTART(vaddr);
430 * Check to see if the section's size will overflow the
431 * allowed task size. Note that p_filesz must always be
432 * <= p_memsize so it is only necessary to check p_memsz.
434 k = load_addr + eppnt->p_vaddr;
435 if (k > TASK_SIZE || eppnt->p_filesz > eppnt->p_memsz ||
436 eppnt->p_memsz > TASK_SIZE || TASK_SIZE - eppnt->p_memsz < k) {
442 * Find the end of the file mapping for this phdr, and keep
443 * track of the largest address we see for this.
445 k = load_addr + eppnt->p_vaddr + eppnt->p_filesz;
450 * Do the same thing for the memory mapping - between
451 * elf_bss and last_bss is the bss section.
453 k = load_addr + eppnt->p_memsz + eppnt->p_vaddr;
460 * Now fill out the bss section. First pad the last page up
461 * to the page boundary, and then perform a mmap to make sure
462 * that there are zero-mapped pages up to and including the
465 if (padzero(elf_bss)) {
470 elf_bss = ELF_PAGESTART(elf_bss + ELF_MIN_ALIGN - 1); /* What we have mapped so far */
472 /* Map the last of the bss segment */
473 if (last_bss > elf_bss) {
474 down_write(¤t->mm->mmap_sem);
475 error = do_brk(elf_bss, last_bss - elf_bss);
476 up_write(¤t->mm->mmap_sem);
481 *interp_load_addr = load_addr;
482 error = ((unsigned long) interp_elf_ex->e_entry) + load_addr;
490 static unsigned long load_aout_interp(struct exec * interp_ex,
491 struct file * interpreter)
493 unsigned long text_data, elf_entry = ~0UL;
497 current->mm->end_code = interp_ex->a_text;
498 text_data = interp_ex->a_text + interp_ex->a_data;
499 current->mm->end_data = text_data;
500 current->mm->brk = interp_ex->a_bss + text_data;
502 switch (N_MAGIC(*interp_ex)) {
505 addr = (char __user *)0;
509 offset = N_TXTOFF(*interp_ex);
510 addr = (char __user *) N_TXTADDR(*interp_ex);
516 down_write(¤t->mm->mmap_sem);
517 do_brk(0, text_data);
518 up_write(¤t->mm->mmap_sem);
519 if (!interpreter->f_op || !interpreter->f_op->read)
521 if (interpreter->f_op->read(interpreter, addr, text_data, &offset) < 0)
523 flush_icache_range((unsigned long)addr,
524 (unsigned long)addr + text_data);
527 down_write(¤t->mm->mmap_sem);
528 do_brk(ELF_PAGESTART(text_data + ELF_MIN_ALIGN - 1),
530 up_write(¤t->mm->mmap_sem);
531 elf_entry = interp_ex->a_entry;
538 * These are the functions used to load ELF style executables and shared
539 * libraries. There is no binary dependent code anywhere else.
542 #define INTERPRETER_NONE 0
543 #define INTERPRETER_AOUT 1
544 #define INTERPRETER_ELF 2
547 static unsigned long randomize_stack_top(unsigned long stack_top)
549 unsigned int random_variable = 0;
551 if (current->flags & PF_RANDOMIZE)
552 random_variable = get_random_int() % (8*1024*1024);
553 #ifdef CONFIG_STACK_GROWSUP
554 return PAGE_ALIGN(stack_top + random_variable);
556 return PAGE_ALIGN(stack_top - random_variable);
560 static int load_elf_binary(struct linux_binprm * bprm, struct pt_regs * regs)
562 struct file *interpreter = NULL; /* to shut gcc up */
563 unsigned long load_addr = 0, load_bias = 0;
564 int load_addr_set = 0;
565 char * elf_interpreter = NULL;
566 unsigned int interpreter_type = INTERPRETER_NONE;
567 unsigned char ibcs2_interpreter = 0;
569 struct elf_phdr * elf_ppnt, *elf_phdata;
570 unsigned long elf_bss, elf_brk;
574 unsigned long elf_entry, interp_load_addr = 0;
575 unsigned long start_code, end_code, start_data, end_data;
576 unsigned long reloc_func_desc = 0;
577 char passed_fileno[6];
578 struct files_struct *files;
579 int have_pt_gnu_stack, executable_stack;
580 unsigned long def_flags = 0;
582 struct elfhdr elf_ex;
583 struct elfhdr interp_elf_ex;
584 struct exec interp_ex;
587 loc = kmalloc(sizeof(*loc), GFP_KERNEL);
593 /* Get the exec-header */
594 loc->elf_ex = *((struct elfhdr *) bprm->buf);
597 /* First of all, some simple consistency checks */
598 if (memcmp(loc->elf_ex.e_ident, ELFMAG, SELFMAG) != 0)
601 if (loc->elf_ex.e_type != ET_EXEC && loc->elf_ex.e_type != ET_DYN)
603 if (!elf_check_arch(&loc->elf_ex))
605 if (!bprm->file->f_op||!bprm->file->f_op->mmap)
608 /* Now read in all of the header information */
610 if (loc->elf_ex.e_phentsize != sizeof(struct elf_phdr))
612 if (loc->elf_ex.e_phnum < 1 ||
613 loc->elf_ex.e_phnum > 65536U / sizeof(struct elf_phdr))
615 size = loc->elf_ex.e_phnum * sizeof(struct elf_phdr);
617 elf_phdata = (struct elf_phdr *) kmalloc(size, GFP_KERNEL);
621 retval = kernel_read(bprm->file, loc->elf_ex.e_phoff, (char *) elf_phdata, size);
622 if (retval != size) {
628 files = current->files; /* Refcounted so ok */
629 retval = unshare_files();
632 if (files == current->files) {
633 put_files_struct(files);
637 /* exec will make our files private anyway, but for the a.out
638 loader stuff we need to do it earlier */
640 retval = get_unused_fd();
643 get_file(bprm->file);
644 fd_install(elf_exec_fileno = retval, bprm->file);
646 elf_ppnt = elf_phdata;
655 for (i = 0; i < loc->elf_ex.e_phnum; i++) {
656 if (elf_ppnt->p_type == PT_INTERP) {
657 /* This is the program interpreter used for
658 * shared libraries - for now assume that this
659 * is an a.out format binary
663 if (elf_ppnt->p_filesz > PATH_MAX ||
664 elf_ppnt->p_filesz < 2)
668 elf_interpreter = (char *) kmalloc(elf_ppnt->p_filesz,
670 if (!elf_interpreter)
673 retval = kernel_read(bprm->file, elf_ppnt->p_offset,
676 if (retval != elf_ppnt->p_filesz) {
679 goto out_free_interp;
681 /* make sure path is NULL terminated */
683 if (elf_interpreter[elf_ppnt->p_filesz - 1] != '\0')
684 goto out_free_interp;
686 /* If the program interpreter is one of these two,
687 * then assume an iBCS2 image. Otherwise assume
688 * a native linux image.
690 if (strcmp(elf_interpreter,"/usr/lib/libc.so.1") == 0 ||
691 strcmp(elf_interpreter,"/usr/lib/ld.so.1") == 0)
692 ibcs2_interpreter = 1;
695 * The early SET_PERSONALITY here is so that the lookup
696 * for the interpreter happens in the namespace of the
697 * to-be-execed image. SET_PERSONALITY can select an
700 * However, SET_PERSONALITY is NOT allowed to switch
701 * this task into the new images's memory mapping
702 * policy - that is, TASK_SIZE must still evaluate to
703 * that which is appropriate to the execing application.
704 * This is because exit_mmap() needs to have TASK_SIZE
705 * evaluate to the size of the old image.
707 * So if (say) a 64-bit application is execing a 32-bit
708 * application it is the architecture's responsibility
709 * to defer changing the value of TASK_SIZE until the
710 * switch really is going to happen - do this in
711 * flush_thread(). - akpm
713 SET_PERSONALITY(loc->elf_ex, ibcs2_interpreter);
715 interpreter = open_exec(elf_interpreter);
716 retval = PTR_ERR(interpreter);
717 if (IS_ERR(interpreter))
718 goto out_free_interp;
719 retval = kernel_read(interpreter, 0, bprm->buf, BINPRM_BUF_SIZE);
720 if (retval != BINPRM_BUF_SIZE) {
723 goto out_free_dentry;
726 /* Get the exec headers */
727 loc->interp_ex = *((struct exec *) bprm->buf);
728 loc->interp_elf_ex = *((struct elfhdr *) bprm->buf);
734 elf_ppnt = elf_phdata;
735 executable_stack = EXSTACK_DEFAULT;
737 for (i = 0; i < loc->elf_ex.e_phnum; i++, elf_ppnt++)
738 if (elf_ppnt->p_type == PT_GNU_STACK) {
739 if (elf_ppnt->p_flags & PF_X)
740 executable_stack = EXSTACK_ENABLE_X;
742 executable_stack = EXSTACK_DISABLE_X;
745 have_pt_gnu_stack = (i < loc->elf_ex.e_phnum);
747 if (current->personality == PER_LINUX && exec_shield == 2) {
748 executable_stack = EXSTACK_DISABLE_X;
749 current->flags |= PF_RANDOMIZE;
752 /* Some simple consistency checks for the interpreter */
753 if (elf_interpreter) {
754 interpreter_type = INTERPRETER_ELF | INTERPRETER_AOUT;
756 /* Now figure out which format our binary is */
757 if ((N_MAGIC(loc->interp_ex) != OMAGIC) &&
758 (N_MAGIC(loc->interp_ex) != ZMAGIC) &&
759 (N_MAGIC(loc->interp_ex) != QMAGIC))
760 interpreter_type = INTERPRETER_ELF;
762 if (memcmp(loc->interp_elf_ex.e_ident, ELFMAG, SELFMAG) != 0)
763 interpreter_type &= ~INTERPRETER_ELF;
766 if (!interpreter_type)
767 goto out_free_dentry;
769 /* Make sure only one type was selected */
770 if ((interpreter_type & INTERPRETER_ELF) &&
771 interpreter_type != INTERPRETER_ELF) {
772 // FIXME - ratelimit this before re-enabling
773 // printk(KERN_WARNING "ELF: Ambiguous type, using ELF\n");
774 interpreter_type = INTERPRETER_ELF;
776 /* Verify the interpreter has a valid arch */
777 if ((interpreter_type == INTERPRETER_ELF) &&
778 !elf_check_arch(&loc->interp_elf_ex))
779 goto out_free_dentry;
781 /* Executables without an interpreter also need a personality */
782 SET_PERSONALITY(loc->elf_ex, ibcs2_interpreter);
785 /* OK, we are done with that, now set up the arg stuff,
786 and then start this sucker up */
788 if ((!bprm->sh_bang) && (interpreter_type == INTERPRETER_AOUT)) {
789 char *passed_p = passed_fileno;
790 sprintf(passed_fileno, "%d", elf_exec_fileno);
792 if (elf_interpreter) {
793 retval = copy_strings_kernel(1, &passed_p, bprm);
795 goto out_free_dentry;
800 /* Flush all traces of the currently running executable */
801 retval = flush_old_exec(bprm);
803 goto out_free_dentry;
807 * Turn off the CS limit completely if exec-shield disabled or
810 if (!exec_shield || executable_stack != EXSTACK_DISABLE_X || nx_enabled)
811 arch_add_exec_range(current->mm, -1);
814 /* Discard our unneeded old files struct */
817 put_files_struct(files);
821 /* OK, This is the point of no return */
822 current->mm->start_data = 0;
823 current->mm->end_data = 0;
824 current->mm->end_code = 0;
825 current->mm->mmap = NULL;
826 current->flags &= ~PF_FORKNOEXEC;
827 current->mm->def_flags = def_flags;
829 /* Do this immediately, since STACK_TOP as used in setup_arg_pages
830 may depend on the personality. */
831 SET_PERSONALITY(loc->elf_ex, ibcs2_interpreter);
832 if (exec_shield != 2 &&
833 elf_read_implies_exec(loc->elf_ex, executable_stack))
834 current->personality |= READ_IMPLIES_EXEC;
836 if ( !(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
837 current->flags |= PF_RANDOMIZE;
838 arch_pick_mmap_layout(current->mm);
840 /* Do this so that we can load the interpreter, if need be. We will
841 change some of these later */
842 set_mm_counter(current->mm, rss, 0);
843 current->mm->free_area_cache = current->mm->mmap_base;
844 retval = setup_arg_pages(bprm, randomize_stack_top(STACK_TOP),
847 send_sig(SIGKILL, current, 0);
848 goto out_free_dentry;
851 current->mm->start_stack = bprm->p;
854 /* Now we do a little grungy work by mmaping the ELF image into
855 the correct location in memory.
858 for(i = 0, elf_ppnt = elf_phdata; i < loc->elf_ex.e_phnum; i++, elf_ppnt++) {
859 int elf_prot = 0, elf_flags;
860 unsigned long k, vaddr;
862 if (elf_ppnt->p_type != PT_LOAD)
865 if (unlikely (elf_brk > elf_bss)) {
868 /* There was a PT_LOAD segment with p_memsz > p_filesz
869 before this one. Map anonymous pages, if needed,
870 and clear the area. */
871 retval = set_brk (elf_bss + load_bias,
872 elf_brk + load_bias);
874 send_sig(SIGKILL, current, 0);
875 goto out_free_dentry;
877 nbyte = ELF_PAGEOFFSET(elf_bss);
879 nbyte = ELF_MIN_ALIGN - nbyte;
880 if (nbyte > elf_brk - elf_bss)
881 nbyte = elf_brk - elf_bss;
882 if (clear_user((void __user *)elf_bss +
885 * This bss-zeroing can fail if the ELF
886 * file specifies odd protections. So
887 * we don't check the return value
893 if (elf_ppnt->p_flags & PF_R) elf_prot |= PROT_READ;
894 if (elf_ppnt->p_flags & PF_W) elf_prot |= PROT_WRITE;
895 if (elf_ppnt->p_flags & PF_X) elf_prot |= PROT_EXEC;
897 elf_flags = MAP_PRIVATE|MAP_DENYWRITE|MAP_EXECUTABLE;
899 vaddr = elf_ppnt->p_vaddr;
900 if (loc->elf_ex.e_type == ET_EXEC || load_addr_set)
901 elf_flags |= MAP_FIXED;
902 else if (loc->elf_ex.e_type == ET_DYN)
906 load_bias = ELF_PAGESTART(ELF_ET_DYN_BASE - vaddr);
909 error = elf_map(bprm->file, load_bias + vaddr, elf_ppnt, elf_prot, elf_flags, 0);
910 if (BAD_ADDR(error)) {
911 send_sig(SIGKILL, current, 0);
912 goto out_free_dentry;
915 if (!load_addr_set) {
917 load_addr = (elf_ppnt->p_vaddr - elf_ppnt->p_offset);
918 if (loc->elf_ex.e_type == ET_DYN) {
920 ELF_PAGESTART(load_bias + vaddr);
921 load_addr += load_bias;
922 reloc_func_desc = load_bias;
925 k = elf_ppnt->p_vaddr;
926 if (k < start_code) start_code = k;
927 if (start_data < k) start_data = k;
930 * Check to see if the section's size will overflow the
931 * allowed task size. Note that p_filesz must always be
932 * <= p_memsz so it is only necessary to check p_memsz.
934 if (k > TASK_SIZE || elf_ppnt->p_filesz > elf_ppnt->p_memsz ||
935 elf_ppnt->p_memsz > TASK_SIZE ||
936 TASK_SIZE - elf_ppnt->p_memsz < k) {
937 /* set_brk can never work. Avoid overflows. */
938 send_sig(SIGKILL, current, 0);
939 goto out_free_dentry;
942 k = elf_ppnt->p_vaddr + elf_ppnt->p_filesz;
946 if ((elf_ppnt->p_flags & PF_X) && end_code < k)
950 k = elf_ppnt->p_vaddr + elf_ppnt->p_memsz;
955 loc->elf_ex.e_entry += load_bias;
956 elf_bss += load_bias;
957 elf_brk += load_bias;
958 start_code += load_bias;
959 end_code += load_bias;
960 start_data += load_bias;
961 end_data += load_bias;
963 /* Calling set_brk effectively mmaps the pages that we need
964 * for the bss and break sections. We must do this before
965 * mapping in the interpreter, to make sure it doesn't wind
966 * up getting placed where the bss needs to go.
968 retval = set_brk(elf_bss, elf_brk);
970 send_sig(SIGKILL, current, 0);
971 goto out_free_dentry;
973 if (padzero(elf_bss)) {
974 send_sig(SIGSEGV, current, 0);
975 retval = -EFAULT; /* Nobody gets to see this, but.. */
976 goto out_free_dentry;
979 if (elf_interpreter) {
980 if (interpreter_type == INTERPRETER_AOUT)
981 elf_entry = load_aout_interp(&loc->interp_ex,
984 elf_entry = load_elf_interp(&loc->interp_elf_ex,
988 if (BAD_ADDR(elf_entry)) {
989 printk(KERN_ERR "Unable to load interpreter %.128s\n",
991 force_sig(SIGSEGV, current);
992 retval = -ENOEXEC; /* Nobody gets to see this, but.. */
993 goto out_free_dentry;
995 reloc_func_desc = interp_load_addr;
997 allow_write_access(interpreter);
999 kfree(elf_interpreter);
1001 elf_entry = loc->elf_ex.e_entry;
1004 if (interpreter_type != INTERPRETER_AOUT)
1005 sys_close(elf_exec_fileno);
1007 set_binfmt(&elf_format);
1009 #ifdef ARCH_HAS_SETUP_ADDITIONAL_PAGES
1010 retval = arch_setup_additional_pages(bprm, executable_stack);
1012 send_sig(SIGKILL, current, 0);
1015 #endif /* ARCH_HAS_SETUP_ADDITIONAL_PAGES */
1019 compute_creds(bprm);
1020 current->flags &= ~PF_FORKNOEXEC;
1021 create_elf_tables(bprm, &loc->elf_ex, (interpreter_type == INTERPRETER_AOUT),
1022 load_addr, interp_load_addr);
1023 /* N.B. passed_fileno might not be initialized? */
1024 if (interpreter_type == INTERPRETER_AOUT)
1025 current->mm->arg_start += strlen(passed_fileno) + 1;
1026 current->mm->end_code = end_code;
1027 current->mm->start_code = start_code;
1028 current->mm->start_data = start_data;
1029 current->mm->end_data = end_data;
1030 current->mm->start_stack = bprm->p;
1032 #ifdef __HAVE_ARCH_RANDOMIZE_BRK
1033 if (current->flags & PF_RANDOMIZE)
1034 randomize_brk(elf_brk);
1036 if (current->personality & MMAP_PAGE_ZERO) {
1037 /* Why this, you ask??? Well SVr4 maps page 0 as read-only,
1038 and some applications "depend" upon this behavior.
1039 Since we do not have the power to recompile these, we
1040 emulate the SVr4 behavior. Sigh. */
1041 down_write(¤t->mm->mmap_sem);
1042 error = do_mmap(NULL, 0, PAGE_SIZE, PROT_READ | PROT_EXEC,
1043 MAP_FIXED | MAP_PRIVATE, 0);
1044 up_write(¤t->mm->mmap_sem);
1047 #ifdef ELF_PLAT_INIT
1049 * The ABI may specify that certain registers be set up in special
1050 * ways (on i386 %edx is the address of a DT_FINI function, for
1051 * example. In addition, it may also specify (eg, PowerPC64 ELF)
1052 * that the e_entry field is the address of the function descriptor
1053 * for the startup routine, rather than the address of the startup
1054 * routine itself. This macro performs whatever initialization to
1055 * the regs structure is required as well as any relocations to the
1056 * function descriptor entries when executing dynamically links apps.
1058 ELF_PLAT_INIT(regs, reloc_func_desc);
1061 start_thread(regs, elf_entry, bprm->p);
1062 if (unlikely(current->ptrace & PT_PTRACED)) {
1063 if (current->ptrace & PT_TRACE_EXEC)
1064 ptrace_notify ((PTRACE_EVENT_EXEC << 8) | SIGTRAP);
1066 send_sig(SIGTRAP, current, 0);
1076 allow_write_access(interpreter);
1080 if (elf_interpreter)
1081 kfree(elf_interpreter);
1083 sys_close(elf_exec_fileno);
1086 put_files_struct(current->files);
1087 current->files = files;
1094 /* This is really simpleminded and specialized - we are loading an
1095 a.out library that is given an ELF header. */
1097 static int load_elf_library(struct file *file)
1099 struct elf_phdr *elf_phdata;
1100 struct elf_phdr *eppnt;
1101 unsigned long elf_bss, bss, len;
1102 int retval, error, i, j;
1103 struct elfhdr elf_ex;
1106 retval = kernel_read(file, 0, (char *) &elf_ex, sizeof(elf_ex));
1107 if (retval != sizeof(elf_ex))
1110 if (memcmp(elf_ex.e_ident, ELFMAG, SELFMAG) != 0)
1113 /* First of all, some simple consistency checks */
1114 if (elf_ex.e_type != ET_EXEC || elf_ex.e_phnum > 2 ||
1115 !elf_check_arch(&elf_ex) || !file->f_op || !file->f_op->mmap)
1118 /* Now read in all of the header information */
1120 j = sizeof(struct elf_phdr) * elf_ex.e_phnum;
1121 /* j < ELF_MIN_ALIGN because elf_ex.e_phnum <= 2 */
1124 elf_phdata = kmalloc(j, GFP_KERNEL);
1130 retval = kernel_read(file, elf_ex.e_phoff, (char *)eppnt, j);
1134 for (j = 0, i = 0; i<elf_ex.e_phnum; i++)
1135 if ((eppnt + i)->p_type == PT_LOAD)
1140 while (eppnt->p_type != PT_LOAD)
1143 /* Now use mmap to map the library into memory. */
1144 down_write(¤t->mm->mmap_sem);
1145 error = do_mmap(file,
1146 ELF_PAGESTART(eppnt->p_vaddr),
1148 ELF_PAGEOFFSET(eppnt->p_vaddr)),
1149 PROT_READ | PROT_WRITE | PROT_EXEC,
1150 MAP_FIXED | MAP_PRIVATE | MAP_DENYWRITE,
1152 ELF_PAGEOFFSET(eppnt->p_vaddr)));
1153 up_write(¤t->mm->mmap_sem);
1154 if (error != ELF_PAGESTART(eppnt->p_vaddr))
1157 elf_bss = eppnt->p_vaddr + eppnt->p_filesz;
1158 if (padzero(elf_bss)) {
1163 len = ELF_PAGESTART(eppnt->p_filesz + eppnt->p_vaddr + ELF_MIN_ALIGN - 1);
1164 bss = eppnt->p_memsz + eppnt->p_vaddr;
1166 down_write(¤t->mm->mmap_sem);
1167 do_brk(len, bss - len);
1168 up_write(¤t->mm->mmap_sem);
1179 * Note that some platforms still use traditional core dumps and not
1180 * the ELF core dump. Each platform can select it as appropriate.
1182 #ifdef USE_ELF_CORE_DUMP
1187 * Modelled on fs/exec.c:aout_core_dump()
1188 * Jeremy Fitzhardinge <jeremy@sw.oz.au>
1191 * These are the only things you should do on a core-file: use only these
1192 * functions to write out all the necessary info.
1194 static int dump_write(struct file *file, const void *addr, int nr)
1196 return file->f_op->write(file, addr, nr, &file->f_pos) == nr;
1199 static int dump_seek(struct file *file, loff_t off)
1201 if (file->f_op->llseek) {
1202 if (file->f_op->llseek(file, off, 0) != off)
1210 * Decide whether a segment is worth dumping; default is yes to be
1211 * sure (missing info is worse than too much; etc).
1212 * Personally I'd include everything, and use the coredump limit...
1214 * I think we should skip something. But I am not sure how. H.J.
1216 static int maydump(struct vm_area_struct *vma)
1218 /* Do not dump I/O mapped devices or special mappings */
1219 if (vma->vm_flags & (VM_IO | VM_RESERVED))
1222 if (vma->vm_flags & VM_DONTEXPAND) /* Kludge for vDSO. */
1225 /* Dump shared memory only if mapped from an anonymous file. */
1226 if (vma->vm_flags & VM_SHARED)
1227 return vma->vm_file->f_dentry->d_inode->i_nlink == 0;
1229 /* If it hasn't been written to, don't write it out */
1236 #define roundup(x, y) ((((x)+((y)-1))/(y))*(y))
1238 /* An ELF note in memory */
1243 unsigned int datasz;
1247 static int notesize(struct memelfnote *en)
1251 sz = sizeof(struct elf_note);
1252 sz += roundup(strlen(en->name) + 1, 4);
1253 sz += roundup(en->datasz, 4);
1258 #define DUMP_WRITE(addr, nr) \
1259 do { if (!dump_write(file, (addr), (nr))) return 0; } while(0)
1260 #define DUMP_SEEK(off) \
1261 do { if (!dump_seek(file, (off))) return 0; } while(0)
1263 static int writenote(struct memelfnote *men, struct file *file)
1267 en.n_namesz = strlen(men->name) + 1;
1268 en.n_descsz = men->datasz;
1269 en.n_type = men->type;
1271 DUMP_WRITE(&en, sizeof(en));
1272 DUMP_WRITE(men->name, en.n_namesz);
1273 /* XXX - cast from long long to long to avoid need for libgcc.a */
1274 DUMP_SEEK(roundup((unsigned long)file->f_pos, 4)); /* XXX */
1275 DUMP_WRITE(men->data, men->datasz);
1276 DUMP_SEEK(roundup((unsigned long)file->f_pos, 4)); /* XXX */
1283 #define DUMP_WRITE(addr, nr) \
1284 if ((size += (nr)) > limit || !dump_write(file, (addr), (nr))) \
1286 #define DUMP_SEEK(off) \
1287 if (!dump_seek(file, (off))) \
1290 static inline void fill_elf_header(struct elfhdr *elf, int segs)
1292 memcpy(elf->e_ident, ELFMAG, SELFMAG);
1293 elf->e_ident[EI_CLASS] = ELF_CLASS;
1294 elf->e_ident[EI_DATA] = ELF_DATA;
1295 elf->e_ident[EI_VERSION] = EV_CURRENT;
1296 elf->e_ident[EI_OSABI] = ELF_OSABI;
1297 memset(elf->e_ident+EI_PAD, 0, EI_NIDENT-EI_PAD);
1299 elf->e_type = ET_CORE;
1300 elf->e_machine = ELF_ARCH;
1301 elf->e_version = EV_CURRENT;
1303 elf->e_phoff = sizeof(struct elfhdr);
1305 elf->e_flags = ELF_CORE_EFLAGS;
1306 elf->e_ehsize = sizeof(struct elfhdr);
1307 elf->e_phentsize = sizeof(struct elf_phdr);
1308 elf->e_phnum = segs;
1309 elf->e_shentsize = 0;
1311 elf->e_shstrndx = 0;
1315 static inline void fill_elf_note_phdr(struct elf_phdr *phdr, int sz, off_t offset)
1317 phdr->p_type = PT_NOTE;
1318 phdr->p_offset = offset;
1321 phdr->p_filesz = sz;
1328 static void fill_note(struct memelfnote *note, const char *name, int type,
1329 unsigned int sz, void *data)
1339 * fill up all the fields in prstatus from the given task struct, except registers
1340 * which need to be filled up separately.
1342 static void fill_prstatus(struct elf_prstatus *prstatus,
1343 struct task_struct *p, long signr)
1345 prstatus->pr_info.si_signo = prstatus->pr_cursig = signr;
1346 prstatus->pr_sigpend = p->pending.signal.sig[0];
1347 prstatus->pr_sighold = p->blocked.sig[0];
1348 prstatus->pr_pid = p->pid;
1349 prstatus->pr_ppid = p->parent->pid;
1350 prstatus->pr_pgrp = process_group(p);
1351 prstatus->pr_sid = p->signal->session;
1352 if (thread_group_leader(p)) {
1354 * This is the record for the group leader. Add in the
1355 * cumulative times of previous dead threads. This total
1356 * won't include the time of each live thread whose state
1357 * is included in the core dump. The final total reported
1358 * to our parent process when it calls wait4 will include
1359 * those sums as well as the little bit more time it takes
1360 * this and each other thread to finish dying after the
1361 * core dump synchronization phase.
1363 cputime_to_timeval(cputime_add(p->utime, p->signal->utime),
1364 &prstatus->pr_utime);
1365 cputime_to_timeval(cputime_add(p->stime, p->signal->stime),
1366 &prstatus->pr_stime);
1368 cputime_to_timeval(p->utime, &prstatus->pr_utime);
1369 cputime_to_timeval(p->stime, &prstatus->pr_stime);
1371 cputime_to_timeval(p->signal->cutime, &prstatus->pr_cutime);
1372 cputime_to_timeval(p->signal->cstime, &prstatus->pr_cstime);
1375 static int fill_psinfo(struct elf_prpsinfo *psinfo, struct task_struct *p,
1376 struct mm_struct *mm)
1378 unsigned int i, len;
1380 /* first copy the parameters from user space */
1381 memset(psinfo, 0, sizeof(struct elf_prpsinfo));
1383 len = mm->arg_end - mm->arg_start;
1384 if (len >= ELF_PRARGSZ)
1385 len = ELF_PRARGSZ-1;
1386 if (copy_from_user(&psinfo->pr_psargs,
1387 (const char __user *)mm->arg_start, len))
1389 for(i = 0; i < len; i++)
1390 if (psinfo->pr_psargs[i] == 0)
1391 psinfo->pr_psargs[i] = ' ';
1392 psinfo->pr_psargs[len] = 0;
1394 psinfo->pr_pid = p->pid;
1395 psinfo->pr_ppid = p->parent->pid;
1396 psinfo->pr_pgrp = process_group(p);
1397 psinfo->pr_sid = p->signal->session;
1399 i = p->state ? ffz(~p->state) + 1 : 0;
1400 psinfo->pr_state = i;
1401 psinfo->pr_sname = (i < 0 || i > 5) ? '.' : "RSDTZW"[i];
1402 psinfo->pr_zomb = psinfo->pr_sname == 'Z';
1403 psinfo->pr_nice = task_nice(p);
1404 psinfo->pr_flag = p->flags;
1405 SET_UID(psinfo->pr_uid, p->uid);
1406 SET_GID(psinfo->pr_gid, p->gid);
1407 strncpy(psinfo->pr_fname, p->comm, sizeof(psinfo->pr_fname));
1412 /* Here is the structure in which status of each thread is captured. */
1413 struct elf_thread_status
1415 struct list_head list;
1416 struct elf_prstatus prstatus; /* NT_PRSTATUS */
1417 elf_fpregset_t fpu; /* NT_PRFPREG */
1418 struct task_struct *thread;
1419 #ifdef ELF_CORE_COPY_XFPREGS
1420 elf_fpxregset_t xfpu; /* NT_PRXFPREG */
1422 struct memelfnote notes[3];
1427 * In order to add the specific thread information for the elf file format,
1428 * we need to keep a linked list of every threads pr_status and then
1429 * create a single section for them in the final core file.
1431 static int elf_dump_thread_status(long signr, struct elf_thread_status *t)
1434 struct task_struct *p = t->thread;
1437 fill_prstatus(&t->prstatus, p, signr);
1438 elf_core_copy_task_regs(p, &t->prstatus.pr_reg);
1440 fill_note(&t->notes[0], "CORE", NT_PRSTATUS, sizeof(t->prstatus), &(t->prstatus));
1442 sz += notesize(&t->notes[0]);
1444 if ((t->prstatus.pr_fpvalid = elf_core_copy_task_fpregs(p, NULL, &t->fpu))) {
1445 fill_note(&t->notes[1], "CORE", NT_PRFPREG, sizeof(t->fpu), &(t->fpu));
1447 sz += notesize(&t->notes[1]);
1450 #ifdef ELF_CORE_COPY_XFPREGS
1451 if (elf_core_copy_task_xfpregs(p, &t->xfpu)) {
1452 fill_note(&t->notes[2], "LINUX", NT_PRXFPREG, sizeof(t->xfpu), &t->xfpu);
1454 sz += notesize(&t->notes[2]);
1463 * This is a two-pass process; first we find the offsets of the bits,
1464 * and then they are actually written out. If we run out of core limit
1467 static int elf_core_dump(long signr, struct pt_regs * regs, struct file * file)
1475 struct vm_area_struct *vma;
1476 struct elfhdr *elf = NULL;
1477 off_t offset = 0, dataoff;
1478 unsigned long limit = current->signal->rlim[RLIMIT_CORE].rlim_cur;
1480 struct memelfnote *notes = NULL;
1481 struct elf_prstatus *prstatus = NULL; /* NT_PRSTATUS */
1482 struct elf_prpsinfo *psinfo = NULL; /* NT_PRPSINFO */
1483 struct task_struct *g, *p;
1484 LIST_HEAD(thread_list);
1485 struct list_head *t;
1486 elf_fpregset_t *fpu = NULL;
1487 #ifdef ELF_CORE_COPY_XFPREGS
1488 elf_fpxregset_t *xfpu = NULL;
1490 int thread_status_size = 0;
1494 * We no longer stop all VM operations.
1496 * This is because those proceses that could possibly change map_count or
1497 * the mmap / vma pages are now blocked in do_exit on current finishing
1500 * Only ptrace can touch these memory addresses, but it doesn't change
1501 * the map_count or the pages allocated. So no possibility of crashing
1502 * exists while dumping the mm->vm_next areas to the core file.
1505 /* alloc memory for large data structures: too large to be on stack */
1506 elf = kmalloc(sizeof(*elf), GFP_KERNEL);
1509 prstatus = kmalloc(sizeof(*prstatus), GFP_KERNEL);
1512 psinfo = kmalloc(sizeof(*psinfo), GFP_KERNEL);
1515 notes = kmalloc(NUM_NOTES * sizeof(struct memelfnote), GFP_KERNEL);
1518 fpu = kmalloc(sizeof(*fpu), GFP_KERNEL);
1521 #ifdef ELF_CORE_COPY_XFPREGS
1522 xfpu = kmalloc(sizeof(*xfpu), GFP_KERNEL);
1528 struct elf_thread_status *tmp;
1529 read_lock(&tasklist_lock);
1531 if (current->mm == p->mm && current != p) {
1532 tmp = kmalloc(sizeof(*tmp), GFP_ATOMIC);
1534 read_unlock(&tasklist_lock);
1537 memset(tmp, 0, sizeof(*tmp));
1538 INIT_LIST_HEAD(&tmp->list);
1540 list_add(&tmp->list, &thread_list);
1542 while_each_thread(g,p);
1543 read_unlock(&tasklist_lock);
1544 list_for_each(t, &thread_list) {
1545 struct elf_thread_status *tmp;
1548 tmp = list_entry(t, struct elf_thread_status, list);
1549 sz = elf_dump_thread_status(signr, tmp);
1550 thread_status_size += sz;
1553 /* now collect the dump for the current */
1554 memset(prstatus, 0, sizeof(*prstatus));
1555 fill_prstatus(prstatus, current, signr);
1556 elf_core_copy_regs(&prstatus->pr_reg, regs);
1558 segs = current->mm->map_count;
1559 #ifdef ELF_CORE_EXTRA_PHDRS
1560 segs += ELF_CORE_EXTRA_PHDRS;
1564 fill_elf_header(elf, segs+1); /* including notes section */
1567 current->flags |= PF_DUMPCORE;
1570 * Set up the notes in similar form to SVR4 core dumps made
1571 * with info from their /proc.
1574 fill_note(notes +0, "CORE", NT_PRSTATUS, sizeof(*prstatus), prstatus);
1576 fill_psinfo(psinfo, current->group_leader, current->mm);
1577 fill_note(notes +1, "CORE", NT_PRPSINFO, sizeof(*psinfo), psinfo);
1579 fill_note(notes +2, "CORE", NT_TASKSTRUCT, sizeof(*current), current);
1583 auxv = (elf_addr_t *) current->mm->saved_auxv;
1588 while (auxv[i - 2] != AT_NULL);
1589 fill_note(¬es[numnote++], "CORE", NT_AUXV,
1590 i * sizeof (elf_addr_t), auxv);
1592 /* Try to dump the FPU. */
1593 if ((prstatus->pr_fpvalid = elf_core_copy_task_fpregs(current, regs, fpu)))
1594 fill_note(notes + numnote++,
1595 "CORE", NT_PRFPREG, sizeof(*fpu), fpu);
1596 #ifdef ELF_CORE_COPY_XFPREGS
1597 if (elf_core_copy_task_xfpregs(current, xfpu))
1598 fill_note(notes + numnote++,
1599 "LINUX", NT_PRXFPREG, sizeof(*xfpu), xfpu);
1605 DUMP_WRITE(elf, sizeof(*elf));
1606 offset += sizeof(*elf); /* Elf header */
1607 offset += (segs+1) * sizeof(struct elf_phdr); /* Program headers */
1609 /* Write notes phdr entry */
1611 struct elf_phdr phdr;
1614 for (i = 0; i < numnote; i++)
1615 sz += notesize(notes + i);
1617 sz += thread_status_size;
1619 fill_elf_note_phdr(&phdr, sz, offset);
1621 DUMP_WRITE(&phdr, sizeof(phdr));
1624 /* Page-align dumped data */
1625 dataoff = offset = roundup(offset, ELF_EXEC_PAGESIZE);
1627 /* Write program headers for segments dump */
1628 for (vma = current->mm->mmap; vma != NULL; vma = vma->vm_next) {
1629 struct elf_phdr phdr;
1632 sz = vma->vm_end - vma->vm_start;
1634 phdr.p_type = PT_LOAD;
1635 phdr.p_offset = offset;
1636 phdr.p_vaddr = vma->vm_start;
1638 phdr.p_filesz = maydump(vma) ? sz : 0;
1640 offset += phdr.p_filesz;
1641 phdr.p_flags = vma->vm_flags & VM_READ ? PF_R : 0;
1642 if (vma->vm_flags & VM_WRITE) phdr.p_flags |= PF_W;
1643 if (vma->vm_flags & VM_EXEC) phdr.p_flags |= PF_X;
1644 phdr.p_align = ELF_EXEC_PAGESIZE;
1646 DUMP_WRITE(&phdr, sizeof(phdr));
1649 #ifdef ELF_CORE_WRITE_EXTRA_PHDRS
1650 ELF_CORE_WRITE_EXTRA_PHDRS;
1653 /* write out the notes section */
1654 for (i = 0; i < numnote; i++)
1655 if (!writenote(notes + i, file))
1658 /* write out the thread status notes section */
1659 list_for_each(t, &thread_list) {
1660 struct elf_thread_status *tmp = list_entry(t, struct elf_thread_status, list);
1661 for (i = 0; i < tmp->num_notes; i++)
1662 if (!writenote(&tmp->notes[i], file))
1668 for (vma = current->mm->mmap; vma != NULL; vma = vma->vm_next) {
1674 for (addr = vma->vm_start;
1676 addr += PAGE_SIZE) {
1678 struct vm_area_struct *vma;
1680 if (get_user_pages(current, current->mm, addr, 1, 0, 1,
1681 &page, &vma) <= 0) {
1682 DUMP_SEEK (file->f_pos + PAGE_SIZE);
1684 if (page == ZERO_PAGE(addr)) {
1685 DUMP_SEEK (file->f_pos + PAGE_SIZE);
1688 flush_cache_page(vma, addr, page_to_pfn(page));
1690 if ((size += PAGE_SIZE) > limit ||
1691 !dump_write(file, kaddr,
1694 page_cache_release(page);
1699 page_cache_release(page);
1704 #ifdef ELF_CORE_WRITE_EXTRA_DATA
1705 ELF_CORE_WRITE_EXTRA_DATA;
1708 if ((off_t) file->f_pos != offset) {
1710 printk("elf_core_dump: file->f_pos (%ld) != offset (%ld)\n",
1711 (off_t) file->f_pos, offset);
1718 while(!list_empty(&thread_list)) {
1719 struct list_head *tmp = thread_list.next;
1721 kfree(list_entry(tmp, struct elf_thread_status, list));
1729 #ifdef ELF_CORE_COPY_XFPREGS
1736 #endif /* USE_ELF_CORE_DUMP */
1738 static int __init init_elf_binfmt(void)
1740 return register_binfmt(&elf_format);
1743 static void __exit exit_elf_binfmt(void)
1745 /* Remove the COFF and ELF loaders. */
1746 unregister_binfmt(&elf_format);
1749 core_initcall(init_elf_binfmt);
1750 module_exit(exit_elf_binfmt);
1751 MODULE_LICENSE("GPL");