2 * linux/fs/binfmt_elf.c
4 * These are the functions used to load ELF format executables as used
5 * on SVr4 machines. Information on the format may be found in the book
6 * "UNIX SYSTEM V RELEASE 4 Programmers Guide: Ansi C and Programming Support
9 * Copyright 1993, 1994: Eric Youngdale (ericy@cais.com).
12 #include <linux/module.h>
13 #include <linux/kernel.h>
15 #include <linux/stat.h>
16 #include <linux/time.h>
18 #include <linux/mman.h>
19 #include <linux/a.out.h>
20 #include <linux/errno.h>
21 #include <linux/signal.h>
22 #include <linux/binfmts.h>
23 #include <linux/string.h>
24 #include <linux/file.h>
25 #include <linux/fcntl.h>
26 #include <linux/ptrace.h>
27 #include <linux/slab.h>
28 #include <linux/shm.h>
29 #include <linux/personality.h>
30 #include <linux/elfcore.h>
31 #include <linux/init.h>
32 #include <linux/highuid.h>
33 #include <linux/smp.h>
34 #include <linux/smp_lock.h>
35 #include <linux/compiler.h>
36 #include <linux/highmem.h>
37 #include <linux/pagemap.h>
38 #include <linux/security.h>
39 #include <linux/syscalls.h>
40 #include <linux/vs_memory.h>
42 #include <asm/uaccess.h>
43 #include <asm/param.h>
45 #include <linux/elf.h>
47 static int load_elf_binary(struct linux_binprm * bprm, struct pt_regs * regs);
48 static int load_elf_library(struct file*);
49 static unsigned long elf_map (struct file *, unsigned long, struct elf_phdr *, int, int, unsigned long);
50 extern int dump_fpu (struct pt_regs *, elf_fpregset_t *);
53 #define elf_addr_t unsigned long
57 * If we don't support core dumping, then supply a NULL so we
60 #ifdef USE_ELF_CORE_DUMP
61 static int elf_core_dump(long signr, struct pt_regs * regs, struct file * file);
63 #define elf_core_dump NULL
66 #if ELF_EXEC_PAGESIZE > PAGE_SIZE
67 # define ELF_MIN_ALIGN ELF_EXEC_PAGESIZE
69 # define ELF_MIN_ALIGN PAGE_SIZE
72 #define ELF_PAGESTART(_v) ((_v) & ~(unsigned long)(ELF_MIN_ALIGN-1))
73 #define ELF_PAGEOFFSET(_v) ((_v) & (ELF_MIN_ALIGN-1))
74 #define ELF_PAGEALIGN(_v) (((_v) + ELF_MIN_ALIGN - 1) & ~(ELF_MIN_ALIGN - 1))
76 static struct linux_binfmt elf_format = {
77 .module = THIS_MODULE,
78 .load_binary = load_elf_binary,
79 .load_shlib = load_elf_library,
80 .core_dump = elf_core_dump,
81 .min_coredump = ELF_EXEC_PAGESIZE
84 #define BAD_ADDR(x) ((unsigned long)(x) > TASK_SIZE)
86 static int set_brk(unsigned long start, unsigned long end)
88 start = ELF_PAGEALIGN(start);
89 end = ELF_PAGEALIGN(end);
91 unsigned long addr = do_brk(start, end - start);
95 current->mm->start_brk = current->mm->brk = end;
100 /* We need to explicitly zero any fractional pages
101 after the data section (i.e. bss). This would
102 contain the junk from the file that should not
106 static void padzero(unsigned long elf_bss)
110 nbyte = ELF_PAGEOFFSET(elf_bss);
112 nbyte = ELF_MIN_ALIGN - nbyte;
113 clear_user((void __user *) elf_bss, nbyte);
117 /* Let's use some macros to make this stack manipulation a litle clearer */
118 #ifdef CONFIG_STACK_GROWSUP
119 #define STACK_ADD(sp, items) ((elf_addr_t __user *)(sp) + (items))
120 #define STACK_ROUND(sp, items) \
121 ((15 + (unsigned long) ((sp) + (items))) &~ 15UL)
122 #define STACK_ALLOC(sp, len) ({ elf_addr_t __user *old_sp = (elf_addr_t __user *)sp; sp += len; old_sp; })
124 #define STACK_ADD(sp, items) ((elf_addr_t __user *)(sp) - (items))
125 #define STACK_ROUND(sp, items) \
126 (((unsigned long) (sp - items)) &~ 15UL)
127 #define STACK_ALLOC(sp, len) ({ sp -= len ; sp; })
131 create_elf_tables(struct linux_binprm *bprm, struct elfhdr * exec,
132 int interp_aout, unsigned long load_addr,
133 unsigned long interp_load_addr)
135 unsigned long p = bprm->p;
136 int argc = bprm->argc;
137 int envc = bprm->envc;
138 elf_addr_t __user *argv;
139 elf_addr_t __user *envp;
140 elf_addr_t __user *sp;
141 elf_addr_t __user *u_platform;
142 const char *k_platform = ELF_PLATFORM;
144 elf_addr_t *elf_info;
146 struct task_struct *tsk = current;
149 * If this architecture has a platform capability string, copy it
150 * to userspace. In some cases (Sparc), this info is impossible
151 * for userspace to get any other way, in others (i386) it is
157 size_t len = strlen(k_platform) + 1;
159 #ifdef __HAVE_ARCH_ALIGN_STACK
160 p = (unsigned long)arch_align_stack((unsigned long)p);
162 u_platform = (elf_addr_t __user *)STACK_ALLOC(p, len);
163 __copy_to_user(u_platform, k_platform, len);
166 /* Create the ELF interpreter info */
167 elf_info = (elf_addr_t *) current->mm->saved_auxv;
168 #define NEW_AUX_ENT(id, val) \
169 do { elf_info[ei_index++] = id; elf_info[ei_index++] = val; } while (0)
173 * ARCH_DLINFO must come first so PPC can do its special alignment of
178 NEW_AUX_ENT(AT_HWCAP, ELF_HWCAP);
179 NEW_AUX_ENT(AT_PAGESZ, ELF_EXEC_PAGESIZE);
180 NEW_AUX_ENT(AT_CLKTCK, CLOCKS_PER_SEC);
181 NEW_AUX_ENT(AT_PHDR, load_addr + exec->e_phoff);
182 NEW_AUX_ENT(AT_PHENT, sizeof (struct elf_phdr));
183 NEW_AUX_ENT(AT_PHNUM, exec->e_phnum);
184 NEW_AUX_ENT(AT_BASE, interp_load_addr);
185 NEW_AUX_ENT(AT_FLAGS, 0);
186 NEW_AUX_ENT(AT_ENTRY, exec->e_entry);
187 NEW_AUX_ENT(AT_UID, (elf_addr_t) tsk->uid);
188 NEW_AUX_ENT(AT_EUID, (elf_addr_t) tsk->euid);
189 NEW_AUX_ENT(AT_GID, (elf_addr_t) tsk->gid);
190 NEW_AUX_ENT(AT_EGID, (elf_addr_t) tsk->egid);
191 NEW_AUX_ENT(AT_SECURE, (elf_addr_t) security_bprm_secureexec(bprm));
193 NEW_AUX_ENT(AT_PLATFORM, (elf_addr_t)(unsigned long)u_platform);
195 if (bprm->interp_flags & BINPRM_FLAGS_EXECFD) {
196 NEW_AUX_ENT(AT_EXECFD, (elf_addr_t) bprm->interp_data);
199 /* AT_NULL is zero; clear the rest too */
200 memset(&elf_info[ei_index], 0,
201 sizeof current->mm->saved_auxv - ei_index * sizeof elf_info[0]);
203 /* And advance past the AT_NULL entry. */
206 sp = STACK_ADD(p, ei_index);
208 items = (argc + 1) + (envc + 1);
210 items += 3; /* a.out interpreters require argv & envp too */
212 items += 1; /* ELF interpreters only put argc on the stack */
214 bprm->p = STACK_ROUND(sp, items);
216 /* Point sp at the lowest address on the stack */
217 #ifdef CONFIG_STACK_GROWSUP
218 sp = (elf_addr_t __user *)bprm->p - items - ei_index;
219 bprm->exec = (unsigned long) sp; /* XXX: PARISC HACK */
221 sp = (elf_addr_t __user *)bprm->p;
224 /* Now, let's put argc (and argv, envp if appropriate) on the stack */
225 __put_user(argc, sp++);
228 envp = argv + argc + 1;
229 __put_user((elf_addr_t)(unsigned long)argv, sp++);
230 __put_user((elf_addr_t)(unsigned long)envp, sp++);
233 envp = argv + argc + 1;
236 /* Populate argv and envp */
237 p = current->mm->arg_start;
240 __put_user((elf_addr_t)p, argv++);
241 len = strnlen_user((void __user *)p, PAGE_SIZE*MAX_ARG_PAGES);
242 if (!len || len > PAGE_SIZE*MAX_ARG_PAGES)
247 current->mm->arg_end = current->mm->env_start = p;
250 __put_user((elf_addr_t)p, envp++);
251 len = strnlen_user((void __user *)p, PAGE_SIZE*MAX_ARG_PAGES);
252 if (!len || len > PAGE_SIZE*MAX_ARG_PAGES)
257 current->mm->env_end = p;
259 /* Put the elf_info on the stack in the right place. */
260 sp = (elf_addr_t __user *)envp + 1;
261 copy_to_user(sp, elf_info, ei_index * sizeof(elf_addr_t));
266 static unsigned long elf_map(struct file *filep, unsigned long addr,
267 struct elf_phdr *eppnt, int prot, int type,
268 unsigned long total_size)
270 unsigned long map_addr;
271 unsigned long size = eppnt->p_filesz + ELF_PAGEOFFSET(eppnt->p_vaddr);
272 unsigned long off = eppnt->p_offset - ELF_PAGEOFFSET(eppnt->p_vaddr);
274 addr = ELF_PAGESTART(addr);
275 size = ELF_PAGEALIGN(size);
277 down_write(¤t->mm->mmap_sem);
280 * total_size is the size of the ELF (interpreter) image.
281 * The _first_ mmap needs to know the full size, otherwise
282 * randomization might put this image into an overlapping
283 * position with the ELF binary image. (since size < total_size)
284 * So we first map the 'big' image - and unmap the remainder at
285 * the end. (which unmap is needed for ELF images with holes.)
288 total_size = ELF_PAGEALIGN(total_size);
289 map_addr = do_mmap(filep, addr, total_size, prot, type, off);
290 if (!BAD_ADDR(map_addr))
291 do_munmap(current->mm, map_addr+size, total_size-size);
293 map_addr = do_mmap(filep, addr, size, prot, type, off);
295 up_write(¤t->mm->mmap_sem);
300 #endif /* !elf_map */
302 static inline unsigned long total_mapping_size(struct elf_phdr *cmds, int nr)
304 int i, first_idx = -1, last_idx = -1;
306 for (i = 0; i < nr; i++)
307 if (cmds[i].p_type == PT_LOAD) {
316 return cmds[last_idx].p_vaddr + cmds[last_idx].p_memsz -
317 ELF_PAGESTART(cmds[first_idx].p_vaddr);
320 /* This is much more generalized than the library routine read function,
321 so we keep this separate. Technically the library read function
322 is only provided so that we can read a.out libraries that have
325 static unsigned long load_elf_interp(struct elfhdr * interp_elf_ex,
326 struct file * interpreter,
327 unsigned long *interp_load_addr,
328 unsigned long no_base)
330 struct elf_phdr *elf_phdata;
331 struct elf_phdr *eppnt;
332 unsigned long load_addr = 0;
333 int load_addr_set = 0;
334 unsigned long last_bss = 0, elf_bss = 0;
335 unsigned long error = ~0UL;
336 unsigned long total_size;
339 /* First of all, some simple consistency checks */
340 if (interp_elf_ex->e_type != ET_EXEC &&
341 interp_elf_ex->e_type != ET_DYN)
343 if (!elf_check_arch(interp_elf_ex))
345 if (!interpreter->f_op || !interpreter->f_op->mmap)
349 * If the size of this structure has changed, then punt, since
350 * we will be doing the wrong thing.
352 if (interp_elf_ex->e_phentsize != sizeof(struct elf_phdr))
354 if (interp_elf_ex->e_phnum > 65536U / sizeof(struct elf_phdr))
357 /* Now read in all of the header information */
359 size = sizeof(struct elf_phdr) * interp_elf_ex->e_phnum;
360 if (size > ELF_MIN_ALIGN)
362 elf_phdata = (struct elf_phdr *) kmalloc(size, GFP_KERNEL);
366 retval = kernel_read(interpreter,interp_elf_ex->e_phoff,(char *)elf_phdata,size);
371 total_size = total_mapping_size(elf_phdata, interp_elf_ex->e_phnum);
376 for (i=0; i<interp_elf_ex->e_phnum; i++, eppnt++) {
377 if (eppnt->p_type == PT_LOAD) {
378 int elf_type = MAP_PRIVATE | MAP_DENYWRITE;
380 unsigned long vaddr = 0;
381 unsigned long k, map_addr;
383 if (eppnt->p_flags & PF_R) elf_prot = PROT_READ;
384 if (eppnt->p_flags & PF_W) elf_prot |= PROT_WRITE;
385 if (eppnt->p_flags & PF_X) elf_prot |= PROT_EXEC;
386 vaddr = eppnt->p_vaddr;
387 if (interp_elf_ex->e_type == ET_EXEC || load_addr_set)
388 elf_type |= MAP_FIXED;
389 else if (no_base && interp_elf_ex->e_type == ET_DYN)
392 map_addr = elf_map(interpreter, load_addr + vaddr, eppnt, elf_prot, elf_type, total_size);
395 if (BAD_ADDR(map_addr))
398 if (!load_addr_set && interp_elf_ex->e_type == ET_DYN) {
399 load_addr = map_addr - ELF_PAGESTART(vaddr);
404 * Check to see if the section's size will overflow the
405 * allowed task size. Note that p_filesz must always be
406 * <= p_memsize so it is only necessary to check p_memsz.
408 k = load_addr + eppnt->p_vaddr;
409 if (k > TASK_SIZE || eppnt->p_filesz > eppnt->p_memsz ||
410 eppnt->p_memsz > TASK_SIZE || TASK_SIZE - eppnt->p_memsz < k) {
416 * Find the end of the file mapping for this phdr, and keep
417 * track of the largest address we see for this.
419 k = load_addr + eppnt->p_vaddr + eppnt->p_filesz;
424 * Do the same thing for the memory mapping - between
425 * elf_bss and last_bss is the bss section.
427 k = load_addr + eppnt->p_memsz + eppnt->p_vaddr;
434 * Now fill out the bss section. First pad the last page up
435 * to the page boundary, and then perform a mmap to make sure
436 * that there are zero-mapped pages up to and including the
440 elf_bss = ELF_PAGESTART(elf_bss + ELF_MIN_ALIGN - 1); /* What we have mapped so far */
442 /* Map the last of the bss segment */
443 if (last_bss > elf_bss) {
444 error = do_brk(elf_bss, last_bss - elf_bss);
449 *interp_load_addr = load_addr;
450 error = ((unsigned long) interp_elf_ex->e_entry) + load_addr;
458 static unsigned long load_aout_interp(struct exec * interp_ex,
459 struct file * interpreter)
461 unsigned long text_data, elf_entry = ~0UL;
465 current->mm->end_code = interp_ex->a_text;
466 text_data = interp_ex->a_text + interp_ex->a_data;
467 current->mm->end_data = text_data;
468 current->mm->brk = interp_ex->a_bss + text_data;
470 switch (N_MAGIC(*interp_ex)) {
473 addr = (char __user *)0;
477 offset = N_TXTOFF(*interp_ex);
478 addr = (char __user *) N_TXTADDR(*interp_ex);
484 do_brk(0, text_data);
485 if (!interpreter->f_op || !interpreter->f_op->read)
487 if (interpreter->f_op->read(interpreter, addr, text_data, &offset) < 0)
489 flush_icache_range((unsigned long)addr,
490 (unsigned long)addr + text_data);
492 do_brk(ELF_PAGESTART(text_data + ELF_MIN_ALIGN - 1),
494 elf_entry = interp_ex->a_entry;
501 * These are the functions used to load ELF style executables and shared
502 * libraries. There is no binary dependent code anywhere else.
505 #define INTERPRETER_NONE 0
506 #define INTERPRETER_AOUT 1
507 #define INTERPRETER_ELF 2
510 static int load_elf_binary(struct linux_binprm * bprm, struct pt_regs * regs)
512 struct file *interpreter = NULL; /* to shut gcc up */
513 unsigned long load_addr = 0, load_bias = 0;
514 int load_addr_set = 0;
515 char * elf_interpreter = NULL;
516 unsigned int interpreter_type = INTERPRETER_NONE;
517 unsigned char ibcs2_interpreter = 0;
519 struct elf_phdr * elf_ppnt, *elf_phdata;
520 unsigned long elf_bss, elf_brk;
524 unsigned long elf_entry, interp_load_addr = 0;
525 unsigned long start_code, end_code, start_data, end_data;
526 unsigned long reloc_func_desc = 0;
527 struct elfhdr elf_ex;
528 struct elfhdr interp_elf_ex;
529 struct exec interp_ex;
530 char passed_fileno[6];
531 struct files_struct *files;
532 int have_pt_gnu_stack, executable_stack, relocexec, old_relocexec = current->flags & PF_RELOCEXEC;
533 unsigned long def_flags = 0;
535 /* Get the exec-header */
536 elf_ex = *((struct elfhdr *) bprm->buf);
539 /* First of all, some simple consistency checks */
540 if (memcmp(elf_ex.e_ident, ELFMAG, SELFMAG) != 0)
543 if (elf_ex.e_type != ET_EXEC && elf_ex.e_type != ET_DYN)
545 if (!elf_check_arch(&elf_ex))
547 if (!bprm->file->f_op||!bprm->file->f_op->mmap)
550 /* Now read in all of the header information */
553 if (elf_ex.e_phentsize != sizeof(struct elf_phdr))
555 if (elf_ex.e_phnum > 65536U / sizeof(struct elf_phdr))
557 size = elf_ex.e_phnum * sizeof(struct elf_phdr);
558 elf_phdata = (struct elf_phdr *) kmalloc(size, GFP_KERNEL);
562 retval = kernel_read(bprm->file, elf_ex.e_phoff, (char *) elf_phdata, size);
566 files = current->files; /* Refcounted so ok */
567 retval = unshare_files();
570 if (files == current->files) {
571 put_files_struct(files);
575 /* exec will make our files private anyway, but for the a.out
576 loader stuff we need to do it earlier */
578 retval = get_unused_fd();
581 get_file(bprm->file);
582 fd_install(elf_exec_fileno = retval, bprm->file);
584 elf_ppnt = elf_phdata;
593 for (i = 0; i < elf_ex.e_phnum; i++) {
594 if (elf_ppnt->p_type == PT_INTERP) {
595 /* This is the program interpreter used for
596 * shared libraries - for now assume that this
597 * is an a.out format binary
601 if (elf_ppnt->p_filesz > PATH_MAX)
603 elf_interpreter = (char *) kmalloc(elf_ppnt->p_filesz,
605 if (!elf_interpreter)
608 retval = kernel_read(bprm->file, elf_ppnt->p_offset,
612 goto out_free_interp;
613 /* If the program interpreter is one of these two,
614 * then assume an iBCS2 image. Otherwise assume
615 * a native linux image.
617 if (strcmp(elf_interpreter,"/usr/lib/libc.so.1") == 0 ||
618 strcmp(elf_interpreter,"/usr/lib/ld.so.1") == 0)
619 ibcs2_interpreter = 1;
622 * The early SET_PERSONALITY here is so that the lookup
623 * for the interpreter happens in the namespace of the
624 * to-be-execed image. SET_PERSONALITY can select an
627 * However, SET_PERSONALITY is NOT allowed to switch
628 * this task into the new images's memory mapping
629 * policy - that is, TASK_SIZE must still evaluate to
630 * that which is appropriate to the execing application.
631 * This is because exit_mmap() needs to have TASK_SIZE
632 * evaluate to the size of the old image.
634 * So if (say) a 64-bit application is execing a 32-bit
635 * application it is the architecture's responsibility
636 * to defer changing the value of TASK_SIZE until the
637 * switch really is going to happen - do this in
638 * flush_thread(). - akpm
640 SET_PERSONALITY(elf_ex, ibcs2_interpreter);
642 interpreter = open_exec(elf_interpreter);
643 retval = PTR_ERR(interpreter);
644 if (IS_ERR(interpreter))
645 goto out_free_interp;
646 retval = kernel_read(interpreter, 0, bprm->buf, BINPRM_BUF_SIZE);
648 goto out_free_dentry;
650 /* Get the exec headers */
651 interp_ex = *((struct exec *) bprm->buf);
652 interp_elf_ex = *((struct elfhdr *) bprm->buf);
658 elf_ppnt = elf_phdata;
659 executable_stack = EXSTACK_DEFAULT;
661 for (i = 0; i < elf_ex.e_phnum; i++, elf_ppnt++)
662 if (elf_ppnt->p_type == PT_GNU_STACK) {
663 if (elf_ppnt->p_flags & PF_X)
664 executable_stack = EXSTACK_ENABLE_X;
666 executable_stack = EXSTACK_DISABLE_X;
669 have_pt_gnu_stack = (i < elf_ex.e_phnum);
673 if (current->personality == PER_LINUX)
674 switch (exec_shield) {
676 if (executable_stack != EXSTACK_DEFAULT) {
677 current->flags |= PF_RELOCEXEC;
678 relocexec = PF_RELOCEXEC;
683 executable_stack = EXSTACK_DISABLE_X;
684 current->flags |= PF_RELOCEXEC;
685 relocexec = PF_RELOCEXEC;
689 /* Some simple consistency checks for the interpreter */
690 if (elf_interpreter) {
691 interpreter_type = INTERPRETER_ELF | INTERPRETER_AOUT;
693 /* Now figure out which format our binary is */
694 if ((N_MAGIC(interp_ex) != OMAGIC) &&
695 (N_MAGIC(interp_ex) != ZMAGIC) &&
696 (N_MAGIC(interp_ex) != QMAGIC))
697 interpreter_type = INTERPRETER_ELF;
699 if (memcmp(interp_elf_ex.e_ident, ELFMAG, SELFMAG) != 0)
700 interpreter_type &= ~INTERPRETER_ELF;
703 if (!interpreter_type)
704 goto out_free_dentry;
706 /* Make sure only one type was selected */
707 if ((interpreter_type & INTERPRETER_ELF) &&
708 interpreter_type != INTERPRETER_ELF) {
709 // FIXME - ratelimit this before re-enabling
710 // printk(KERN_WARNING "ELF: Ambiguous type, using ELF\n");
711 interpreter_type = INTERPRETER_ELF;
713 /* Verify the interpreter has a valid arch */
714 if ((interpreter_type == INTERPRETER_ELF) &&
715 !elf_check_arch(&interp_elf_ex))
716 goto out_free_dentry;
718 /* Executables without an interpreter also need a personality */
719 SET_PERSONALITY(elf_ex, ibcs2_interpreter);
722 /* OK, we are done with that, now set up the arg stuff,
723 and then start this sucker up */
725 if ((!bprm->sh_bang) && (interpreter_type == INTERPRETER_AOUT)) {
726 char *passed_p = passed_fileno;
727 sprintf(passed_fileno, "%d", elf_exec_fileno);
729 if (elf_interpreter) {
730 retval = copy_strings_kernel(1, &passed_p, bprm);
732 goto out_free_dentry;
737 /* Flush all traces of the currently running executable */
738 retval = flush_old_exec(bprm);
740 goto out_free_dentry;
741 current->flags |= relocexec;
745 * Turn off the CS limit completely if exec-shield disabled or
749 arch_add_exec_range(current->mm, -1);
752 /* Discard our unneeded old files struct */
755 put_files_struct(files);
759 /* OK, This is the point of no return */
760 current->mm->start_data = 0;
761 current->mm->end_data = 0;
762 current->mm->end_code = 0;
763 current->mm->mmap = NULL;
764 current->flags &= ~PF_FORKNOEXEC;
765 current->mm->def_flags = def_flags;
767 /* Do this immediately, since STACK_TOP as used in setup_arg_pages
768 may depend on the personality. */
769 SET_PERSONALITY(elf_ex, ibcs2_interpreter);
770 if (elf_read_implies_exec(elf_ex, have_pt_gnu_stack))
771 current->personality |= READ_IMPLIES_EXEC;
773 /* Do this so that we can load the interpreter, if need be. We will
774 change some of these later */
775 // current->mm->rss = 0;
776 vx_rsspages_sub(current->mm, current->mm->rss);
777 current->mm->free_area_cache = current->mm->mmap_base;
778 retval = setup_arg_pages(bprm, executable_stack);
780 send_sig(SIGKILL, current, 0);
781 goto out_free_dentry;
784 current->mm->start_stack = bprm->p;
787 /* Now we do a little grungy work by mmaping the ELF image into
788 the correct location in memory.
791 for(i = 0, elf_ppnt = elf_phdata; i < elf_ex.e_phnum; i++, elf_ppnt++) {
792 int elf_prot = 0, elf_flags;
793 unsigned long k, vaddr;
795 if (elf_ppnt->p_type != PT_LOAD)
798 if (unlikely (elf_brk > elf_bss)) {
801 /* There was a PT_LOAD segment with p_memsz > p_filesz
802 before this one. Map anonymous pages, if needed,
803 and clear the area. */
804 retval = set_brk (elf_bss + load_bias,
805 elf_brk + load_bias);
807 send_sig(SIGKILL, current, 0);
808 goto out_free_dentry;
810 nbyte = ELF_PAGEOFFSET(elf_bss);
812 nbyte = ELF_MIN_ALIGN - nbyte;
813 if (nbyte > elf_brk - elf_bss)
814 nbyte = elf_brk - elf_bss;
815 clear_user((void __user *) elf_bss + load_bias, nbyte);
819 if (elf_ppnt->p_flags & PF_R) elf_prot |= PROT_READ;
820 if (elf_ppnt->p_flags & PF_W) elf_prot |= PROT_WRITE;
821 if (elf_ppnt->p_flags & PF_X) elf_prot |= PROT_EXEC;
823 elf_flags = MAP_PRIVATE|MAP_DENYWRITE|MAP_EXECUTABLE;
825 vaddr = elf_ppnt->p_vaddr;
826 if (elf_ex.e_type == ET_EXEC || load_addr_set)
827 elf_flags |= MAP_FIXED;
828 else if (elf_ex.e_type == ET_DYN)
832 load_bias = ELF_PAGESTART(ELF_ET_DYN_BASE - vaddr);
835 error = elf_map(bprm->file, load_bias + vaddr, elf_ppnt, elf_prot, elf_flags, 0);
839 if (!load_addr_set) {
841 load_addr = (elf_ppnt->p_vaddr - elf_ppnt->p_offset);
842 if (elf_ex.e_type == ET_DYN) {
844 ELF_PAGESTART(load_bias + vaddr);
845 load_addr += load_bias;
846 reloc_func_desc = load_bias;
849 k = elf_ppnt->p_vaddr;
850 if (k < start_code) start_code = k;
851 if (start_data < k) start_data = k;
854 * Check to see if the section's size will overflow the
855 * allowed task size. Note that p_filesz must always be
856 * <= p_memsz so it is only necessary to check p_memsz.
858 if (k > TASK_SIZE || elf_ppnt->p_filesz > elf_ppnt->p_memsz ||
859 elf_ppnt->p_memsz > TASK_SIZE ||
860 TASK_SIZE - elf_ppnt->p_memsz < k) {
861 /* set_brk can never work. Avoid overflows. */
862 send_sig(SIGKILL, current, 0);
863 goto out_free_dentry;
866 k = elf_ppnt->p_vaddr + elf_ppnt->p_filesz;
870 if ((elf_ppnt->p_flags & PF_X) && end_code < k)
874 k = elf_ppnt->p_vaddr + elf_ppnt->p_memsz;
879 elf_ex.e_entry += load_bias;
880 elf_bss += load_bias;
881 elf_brk += load_bias;
882 start_code += load_bias;
883 end_code += load_bias;
884 start_data += load_bias;
885 end_data += load_bias;
887 /* Calling set_brk effectively mmaps the pages that we need
888 * for the bss and break sections. We must do this before
889 * mapping in the interpreter, to make sure it doesn't wind
890 * up getting placed where the bss needs to go.
892 retval = set_brk(elf_bss, elf_brk);
894 send_sig(SIGKILL, current, 0);
895 goto out_free_dentry;
899 if (elf_interpreter) {
900 if (interpreter_type == INTERPRETER_AOUT)
901 elf_entry = load_aout_interp(&interp_ex,
904 elf_entry = load_elf_interp(&interp_elf_ex,
908 if (BAD_ADDR(elf_entry)) {
909 printk(KERN_ERR "Unable to load interpreter\n");
910 send_sig(SIGSEGV, current, 0);
911 retval = -ENOEXEC; /* Nobody gets to see this, but.. */
912 goto out_free_dentry;
914 reloc_func_desc = interp_load_addr;
916 allow_write_access(interpreter);
918 kfree(elf_interpreter);
920 elf_entry = elf_ex.e_entry;
925 if (interpreter_type != INTERPRETER_AOUT)
926 sys_close(elf_exec_fileno);
928 set_binfmt(&elf_format);
931 * Map the vsyscall trampoline. This address is then passed via
934 #ifdef __HAVE_ARCH_VSYSCALL
939 current->flags &= ~PF_FORKNOEXEC;
940 create_elf_tables(bprm, &elf_ex, (interpreter_type == INTERPRETER_AOUT),
941 load_addr, interp_load_addr);
942 /* N.B. passed_fileno might not be initialized? */
943 if (interpreter_type == INTERPRETER_AOUT)
944 current->mm->arg_start += strlen(passed_fileno) + 1;
945 current->mm->end_code = end_code;
946 current->mm->start_code = start_code;
947 current->mm->start_data = start_data;
948 current->mm->end_data = end_data;
949 current->mm->start_stack = bprm->p;
951 #ifdef __HAVE_ARCH_RANDOMIZE_BRK
952 if (current->flags & PF_RELOCEXEC)
953 randomize_brk(elf_brk);
955 if (current->personality & MMAP_PAGE_ZERO) {
956 /* Why this, you ask??? Well SVr4 maps page 0 as read-only,
957 and some applications "depend" upon this behavior.
958 Since we do not have the power to recompile these, we
959 emulate the SVr4 behavior. Sigh. */
960 down_write(¤t->mm->mmap_sem);
961 error = do_mmap(NULL, 0, PAGE_SIZE, PROT_READ | PROT_EXEC,
962 MAP_FIXED | MAP_PRIVATE, 0);
963 up_write(¤t->mm->mmap_sem);
968 * The ABI may specify that certain registers be set up in special
969 * ways (on i386 %edx is the address of a DT_FINI function, for
970 * example. In addition, it may also specify (eg, PowerPC64 ELF)
971 * that the e_entry field is the address of the function descriptor
972 * for the startup routine, rather than the address of the startup
973 * routine itself. This macro performs whatever initialization to
974 * the regs structure is required as well as any relocations to the
975 * function descriptor entries when executing dynamically links apps.
977 ELF_PLAT_INIT(regs, reloc_func_desc);
980 start_thread(regs, elf_entry, bprm->p);
981 if (unlikely(current->ptrace & PT_PTRACED)) {
982 if (current->ptrace & PT_TRACE_EXEC)
983 ptrace_notify ((PTRACE_EVENT_EXEC << 8) | SIGTRAP);
985 send_sig(SIGTRAP, current, 0);
993 allow_write_access(interpreter);
998 kfree(elf_interpreter);
1000 sys_close(elf_exec_fileno);
1003 put_files_struct(current->files);
1004 current->files = files;
1008 current->flags &= ~PF_RELOCEXEC;
1009 current->flags |= old_relocexec;
1013 /* This is really simpleminded and specialized - we are loading an
1014 a.out library that is given an ELF header. */
1016 static int load_elf_library(struct file *file)
1018 struct elf_phdr *elf_phdata;
1019 unsigned long elf_bss, bss, len;
1020 int retval, error, i, j;
1021 struct elfhdr elf_ex;
1024 retval = kernel_read(file, 0, (char *) &elf_ex, sizeof(elf_ex));
1025 if (retval != sizeof(elf_ex))
1028 if (memcmp(elf_ex.e_ident, ELFMAG, SELFMAG) != 0)
1031 /* First of all, some simple consistency checks */
1032 if (elf_ex.e_type != ET_EXEC || elf_ex.e_phnum > 2 ||
1033 !elf_check_arch(&elf_ex) || !file->f_op || !file->f_op->mmap)
1036 /* Now read in all of the header information */
1038 j = sizeof(struct elf_phdr) * elf_ex.e_phnum;
1039 /* j < ELF_MIN_ALIGN because elf_ex.e_phnum <= 2 */
1042 elf_phdata = (struct elf_phdr *) kmalloc(j, GFP_KERNEL);
1047 retval = kernel_read(file, elf_ex.e_phoff, (char *) elf_phdata, j);
1051 for (j = 0, i = 0; i<elf_ex.e_phnum; i++)
1052 if ((elf_phdata + i)->p_type == PT_LOAD) j++;
1056 while (elf_phdata->p_type != PT_LOAD) elf_phdata++;
1058 /* Now use mmap to map the library into memory. */
1059 down_write(¤t->mm->mmap_sem);
1060 error = do_mmap(file,
1061 ELF_PAGESTART(elf_phdata->p_vaddr),
1062 (elf_phdata->p_filesz +
1063 ELF_PAGEOFFSET(elf_phdata->p_vaddr)),
1064 PROT_READ | PROT_WRITE | PROT_EXEC,
1065 MAP_FIXED | MAP_PRIVATE | MAP_DENYWRITE,
1066 (elf_phdata->p_offset -
1067 ELF_PAGEOFFSET(elf_phdata->p_vaddr)));
1068 up_write(¤t->mm->mmap_sem);
1069 if (error != ELF_PAGESTART(elf_phdata->p_vaddr))
1072 elf_bss = elf_phdata->p_vaddr + elf_phdata->p_filesz;
1075 len = ELF_PAGESTART(elf_phdata->p_filesz + elf_phdata->p_vaddr + ELF_MIN_ALIGN - 1);
1076 bss = elf_phdata->p_memsz + elf_phdata->p_vaddr;
1078 do_brk(len, bss - len);
1088 * Note that some platforms still use traditional core dumps and not
1089 * the ELF core dump. Each platform can select it as appropriate.
1091 #ifdef USE_ELF_CORE_DUMP
1096 * Modelled on fs/exec.c:aout_core_dump()
1097 * Jeremy Fitzhardinge <jeremy@sw.oz.au>
1100 * These are the only things you should do on a core-file: use only these
1101 * functions to write out all the necessary info.
1103 static int dump_write(struct file *file, const void *addr, int nr)
1105 return file->f_op->write(file, addr, nr, &file->f_pos) == nr;
1108 static int dump_seek(struct file *file, off_t off)
1110 if (file->f_op->llseek) {
1111 if (file->f_op->llseek(file, off, 0) != off)
1119 * Decide whether a segment is worth dumping; default is yes to be
1120 * sure (missing info is worse than too much; etc).
1121 * Personally I'd include everything, and use the coredump limit...
1123 * I think we should skip something. But I am not sure how. H.J.
1125 static int maydump(struct vm_area_struct *vma)
1128 * If we may not read the contents, don't allow us to dump
1129 * them either. "dump_write()" can't handle it anyway.
1131 if (!(vma->vm_flags & VM_READ))
1134 /* Do not dump I/O mapped devices! -DaveM */
1135 if (vma->vm_flags & VM_IO)
1138 if (vma->vm_flags & (VM_WRITE|VM_GROWSUP|VM_GROWSDOWN))
1140 if (vma->vm_flags & (VM_READ|VM_EXEC|VM_EXECUTABLE|VM_SHARED))
1146 #define roundup(x, y) ((((x)+((y)-1))/(y))*(y))
1148 /* An ELF note in memory */
1153 unsigned int datasz;
1157 static int notesize(struct memelfnote *en)
1161 sz = sizeof(struct elf_note);
1162 sz += roundup(strlen(en->name) + 1, 4);
1163 sz += roundup(en->datasz, 4);
1168 #define DUMP_WRITE(addr, nr) \
1169 do { if (!dump_write(file, (addr), (nr))) return 0; } while(0)
1170 #define DUMP_SEEK(off) \
1171 do { if (!dump_seek(file, (off))) return 0; } while(0)
1173 static int writenote(struct memelfnote *men, struct file *file)
1177 en.n_namesz = strlen(men->name) + 1;
1178 en.n_descsz = men->datasz;
1179 en.n_type = men->type;
1181 DUMP_WRITE(&en, sizeof(en));
1182 DUMP_WRITE(men->name, en.n_namesz);
1183 /* XXX - cast from long long to long to avoid need for libgcc.a */
1184 DUMP_SEEK(roundup((unsigned long)file->f_pos, 4)); /* XXX */
1185 DUMP_WRITE(men->data, men->datasz);
1186 DUMP_SEEK(roundup((unsigned long)file->f_pos, 4)); /* XXX */
1193 #define DUMP_WRITE(addr, nr) \
1194 if ((size += (nr)) > limit || !dump_write(file, (addr), (nr))) \
1196 #define DUMP_SEEK(off) \
1197 if (!dump_seek(file, (off))) \
1200 static inline void fill_elf_header(struct elfhdr *elf, int segs)
1202 memcpy(elf->e_ident, ELFMAG, SELFMAG);
1203 elf->e_ident[EI_CLASS] = ELF_CLASS;
1204 elf->e_ident[EI_DATA] = ELF_DATA;
1205 elf->e_ident[EI_VERSION] = EV_CURRENT;
1206 elf->e_ident[EI_OSABI] = ELF_OSABI;
1207 memset(elf->e_ident+EI_PAD, 0, EI_NIDENT-EI_PAD);
1209 elf->e_type = ET_CORE;
1210 elf->e_machine = ELF_ARCH;
1211 elf->e_version = EV_CURRENT;
1213 elf->e_phoff = sizeof(struct elfhdr);
1216 elf->e_ehsize = sizeof(struct elfhdr);
1217 elf->e_phentsize = sizeof(struct elf_phdr);
1218 elf->e_phnum = segs;
1219 elf->e_shentsize = 0;
1221 elf->e_shstrndx = 0;
1225 static inline void fill_elf_note_phdr(struct elf_phdr *phdr, int sz, off_t offset)
1227 phdr->p_type = PT_NOTE;
1228 phdr->p_offset = offset;
1231 phdr->p_filesz = sz;
1238 static void fill_note(struct memelfnote *note, const char *name, int type,
1239 unsigned int sz, void *data)
1249 * fill up all the fields in prstatus from the given task struct, except registers
1250 * which need to be filled up separately.
1252 static void fill_prstatus(struct elf_prstatus *prstatus,
1253 struct task_struct *p, long signr)
1255 prstatus->pr_info.si_signo = prstatus->pr_cursig = signr;
1256 prstatus->pr_sigpend = p->pending.signal.sig[0];
1257 prstatus->pr_sighold = p->blocked.sig[0];
1258 prstatus->pr_pid = p->pid;
1259 prstatus->pr_ppid = p->parent->pid;
1260 prstatus->pr_pgrp = process_group(p);
1261 prstatus->pr_sid = p->signal->session;
1262 jiffies_to_timeval(p->utime, &prstatus->pr_utime);
1263 jiffies_to_timeval(p->stime, &prstatus->pr_stime);
1264 jiffies_to_timeval(p->cutime, &prstatus->pr_cutime);
1265 jiffies_to_timeval(p->cstime, &prstatus->pr_cstime);
1268 static void fill_psinfo(struct elf_prpsinfo *psinfo, struct task_struct *p,
1269 struct mm_struct *mm)
1273 /* first copy the parameters from user space */
1274 memset(psinfo, 0, sizeof(struct elf_prpsinfo));
1276 len = mm->arg_end - mm->arg_start;
1277 if (len >= ELF_PRARGSZ)
1278 len = ELF_PRARGSZ-1;
1279 copy_from_user(&psinfo->pr_psargs,
1280 (const char __user *)mm->arg_start, len);
1281 for(i = 0; i < len; i++)
1282 if (psinfo->pr_psargs[i] == 0)
1283 psinfo->pr_psargs[i] = ' ';
1284 psinfo->pr_psargs[len] = 0;
1286 psinfo->pr_pid = p->pid;
1287 psinfo->pr_ppid = p->parent->pid;
1288 psinfo->pr_pgrp = process_group(p);
1289 psinfo->pr_sid = p->signal->session;
1291 i = p->state ? ffz(~p->state) + 1 : 0;
1292 psinfo->pr_state = i;
1293 psinfo->pr_sname = (i < 0 || i > 5) ? '.' : "RSDTZW"[i];
1294 psinfo->pr_zomb = psinfo->pr_sname == 'Z';
1295 psinfo->pr_nice = task_nice(p);
1296 psinfo->pr_flag = p->flags;
1297 SET_UID(psinfo->pr_uid, p->uid);
1298 SET_GID(psinfo->pr_gid, p->gid);
1299 strncpy(psinfo->pr_fname, p->comm, sizeof(psinfo->pr_fname));
1304 /* Here is the structure in which status of each thread is captured. */
1305 struct elf_thread_status
1307 struct list_head list;
1308 struct elf_prstatus prstatus; /* NT_PRSTATUS */
1309 elf_fpregset_t fpu; /* NT_PRFPREG */
1310 #ifdef ELF_CORE_COPY_XFPREGS
1311 elf_fpxregset_t xfpu; /* NT_PRXFPREG */
1313 struct memelfnote notes[3];
1318 * In order to add the specific thread information for the elf file format,
1319 * we need to keep a linked list of every threads pr_status and then
1320 * create a single section for them in the final core file.
1322 static int elf_dump_thread_status(long signr, struct task_struct * p, struct list_head * thread_list)
1325 struct elf_thread_status *t;
1328 t = kmalloc(sizeof(*t), GFP_ATOMIC);
1331 memset(t, 0, sizeof(*t));
1333 INIT_LIST_HEAD(&t->list);
1336 fill_prstatus(&t->prstatus, p, signr);
1337 elf_core_copy_task_regs(p, &t->prstatus.pr_reg);
1339 fill_note(&t->notes[0], "CORE", NT_PRSTATUS, sizeof(t->prstatus), &(t->prstatus));
1341 sz += notesize(&t->notes[0]);
1343 if ((t->prstatus.pr_fpvalid = elf_core_copy_task_fpregs(p, NULL, &t->fpu))) {
1344 fill_note(&t->notes[1], "CORE", NT_PRFPREG, sizeof(t->fpu), &(t->fpu));
1346 sz += notesize(&t->notes[1]);
1349 #ifdef ELF_CORE_COPY_XFPREGS
1350 if (elf_core_copy_task_xfpregs(p, &t->xfpu)) {
1351 fill_note(&t->notes[2], "LINUX", NT_PRXFPREG, sizeof(t->xfpu), &t->xfpu);
1353 sz += notesize(&t->notes[2]);
1356 list_add(&t->list, thread_list);
1363 * This is a two-pass process; first we find the offsets of the bits,
1364 * and then they are actually written out. If we run out of core limit
1367 static int elf_core_dump(long signr, struct pt_regs * regs, struct file * file)
1375 struct vm_area_struct *vma;
1376 struct elfhdr *elf = NULL;
1377 off_t offset = 0, dataoff;
1378 unsigned long limit = current->rlim[RLIMIT_CORE].rlim_cur;
1380 struct memelfnote *notes = NULL;
1381 struct elf_prstatus *prstatus = NULL; /* NT_PRSTATUS */
1382 struct elf_prpsinfo *psinfo = NULL; /* NT_PRPSINFO */
1383 struct task_struct *g, *p;
1384 LIST_HEAD(thread_list);
1385 struct list_head *t;
1386 elf_fpregset_t *fpu = NULL;
1387 #ifdef ELF_CORE_COPY_XFPREGS
1388 elf_fpxregset_t *xfpu = NULL;
1390 int thread_status_size = 0;
1394 * We no longer stop all VM operations.
1396 * This is because those proceses that could possibly change map_count or
1397 * the mmap / vma pages are now blocked in do_exit on current finishing
1400 * Only ptrace can touch these memory addresses, but it doesn't change
1401 * the map_count or the pages allocated. So no possibility of crashing
1402 * exists while dumping the mm->vm_next areas to the core file.
1405 /* alloc memory for large data structures: too large to be on stack */
1406 elf = kmalloc(sizeof(*elf), GFP_KERNEL);
1409 prstatus = kmalloc(sizeof(*prstatus), GFP_KERNEL);
1412 psinfo = kmalloc(sizeof(*psinfo), GFP_KERNEL);
1415 notes = kmalloc(NUM_NOTES * sizeof(struct memelfnote), GFP_KERNEL);
1418 fpu = kmalloc(sizeof(*fpu), GFP_KERNEL);
1421 #ifdef ELF_CORE_COPY_XFPREGS
1422 xfpu = kmalloc(sizeof(*xfpu), GFP_KERNEL);
1427 /* capture the status of all other threads */
1429 read_lock(&tasklist_lock);
1431 if (current->mm == p->mm && current != p) {
1432 int sz = elf_dump_thread_status(signr, p, &thread_list);
1434 read_unlock(&tasklist_lock);
1437 thread_status_size += sz;
1439 while_each_thread(g,p);
1440 read_unlock(&tasklist_lock);
1443 /* now collect the dump for the current */
1444 memset(prstatus, 0, sizeof(*prstatus));
1445 fill_prstatus(prstatus, current, signr);
1446 elf_core_copy_regs(&prstatus->pr_reg, regs);
1448 segs = current->mm->map_count;
1449 #ifdef ELF_CORE_EXTRA_PHDRS
1450 segs += ELF_CORE_EXTRA_PHDRS;
1454 fill_elf_header(elf, segs+1); /* including notes section */
1457 current->flags |= PF_DUMPCORE;
1460 * Set up the notes in similar form to SVR4 core dumps made
1461 * with info from their /proc.
1464 fill_note(notes +0, "CORE", NT_PRSTATUS, sizeof(*prstatus), prstatus);
1466 fill_psinfo(psinfo, current->group_leader, current->mm);
1467 fill_note(notes +1, "CORE", NT_PRPSINFO, sizeof(*psinfo), psinfo);
1469 fill_note(notes +2, "CORE", NT_TASKSTRUCT, sizeof(*current), current);
1473 auxv = (elf_addr_t *) current->mm->saved_auxv;
1478 while (auxv[i - 2] != AT_NULL);
1479 fill_note(¬es[numnote++], "CORE", NT_AUXV,
1480 i * sizeof (elf_addr_t), auxv);
1482 /* Try to dump the FPU. */
1483 if ((prstatus->pr_fpvalid = elf_core_copy_task_fpregs(current, regs, fpu)))
1484 fill_note(notes + numnote++,
1485 "CORE", NT_PRFPREG, sizeof(*fpu), fpu);
1486 #ifdef ELF_CORE_COPY_XFPREGS
1487 if (elf_core_copy_task_xfpregs(current, xfpu))
1488 fill_note(notes + numnote++,
1489 "LINUX", NT_PRXFPREG, sizeof(*xfpu), xfpu);
1495 DUMP_WRITE(elf, sizeof(*elf));
1496 offset += sizeof(*elf); /* Elf header */
1497 offset += (segs+1) * sizeof(struct elf_phdr); /* Program headers */
1499 /* Write notes phdr entry */
1501 struct elf_phdr phdr;
1504 for (i = 0; i < numnote; i++)
1505 sz += notesize(notes + i);
1507 sz += thread_status_size;
1509 fill_elf_note_phdr(&phdr, sz, offset);
1511 DUMP_WRITE(&phdr, sizeof(phdr));
1514 /* Page-align dumped data */
1515 dataoff = offset = roundup(offset, ELF_EXEC_PAGESIZE);
1517 /* Write program headers for segments dump */
1518 for (vma = current->mm->mmap; vma != NULL; vma = vma->vm_next) {
1519 struct elf_phdr phdr;
1522 sz = vma->vm_end - vma->vm_start;
1524 phdr.p_type = PT_LOAD;
1525 phdr.p_offset = offset;
1526 phdr.p_vaddr = vma->vm_start;
1528 phdr.p_filesz = maydump(vma) ? sz : 0;
1530 offset += phdr.p_filesz;
1531 phdr.p_flags = vma->vm_flags & VM_READ ? PF_R : 0;
1532 if (vma->vm_flags & VM_WRITE) phdr.p_flags |= PF_W;
1533 if (vma->vm_flags & VM_EXEC) phdr.p_flags |= PF_X;
1534 phdr.p_align = ELF_EXEC_PAGESIZE;
1536 DUMP_WRITE(&phdr, sizeof(phdr));
1539 #ifdef ELF_CORE_WRITE_EXTRA_PHDRS
1540 ELF_CORE_WRITE_EXTRA_PHDRS;
1543 /* write out the notes section */
1544 for (i = 0; i < numnote; i++)
1545 if (!writenote(notes + i, file))
1548 /* write out the thread status notes section */
1549 list_for_each(t, &thread_list) {
1550 struct elf_thread_status *tmp = list_entry(t, struct elf_thread_status, list);
1551 for (i = 0; i < tmp->num_notes; i++)
1552 if (!writenote(&tmp->notes[i], file))
1558 for (vma = current->mm->mmap; vma != NULL; vma = vma->vm_next) {
1564 for (addr = vma->vm_start;
1566 addr += PAGE_SIZE) {
1568 struct vm_area_struct *vma;
1570 if (get_user_pages(current, current->mm, addr, 1, 0, 1,
1571 &page, &vma) <= 0) {
1572 DUMP_SEEK (file->f_pos + PAGE_SIZE);
1574 if (page == ZERO_PAGE(addr)) {
1575 DUMP_SEEK (file->f_pos + PAGE_SIZE);
1578 flush_cache_page(vma, addr);
1580 if ((size += PAGE_SIZE) > limit ||
1581 !dump_write(file, kaddr,
1584 page_cache_release(page);
1589 page_cache_release(page);
1594 #ifdef ELF_CORE_WRITE_EXTRA_DATA
1595 ELF_CORE_WRITE_EXTRA_DATA;
1598 if ((off_t) file->f_pos != offset) {
1600 printk("elf_core_dump: file->f_pos (%ld) != offset (%ld)\n",
1601 (off_t) file->f_pos, offset);
1608 while(!list_empty(&thread_list)) {
1609 struct list_head *tmp = thread_list.next;
1611 kfree(list_entry(tmp, struct elf_thread_status, list));
1619 #ifdef ELF_CORE_COPY_XFPREGS
1626 #endif /* USE_ELF_CORE_DUMP */
1628 static int __init init_elf_binfmt(void)
1630 return register_binfmt(&elf_format);
1633 static void __exit exit_elf_binfmt(void)
1635 /* Remove the COFF and ELF loaders. */
1636 unregister_binfmt(&elf_format);
1639 core_initcall(init_elf_binfmt);
1640 module_exit(exit_elf_binfmt);
1641 MODULE_LICENSE("GPL");