2 * linux/fs/binfmt_elf.c
4 * These are the functions used to load ELF format executables as used
5 * on SVr4 machines. Information on the format may be found in the book
6 * "UNIX SYSTEM V RELEASE 4 Programmers Guide: Ansi C and Programming Support
9 * Copyright 1993, 1994: Eric Youngdale (ericy@cais.com).
12 #include <linux/module.h>
13 #include <linux/kernel.h>
15 #include <linux/stat.h>
16 #include <linux/time.h>
18 #include <linux/mman.h>
19 #include <linux/a.out.h>
20 #include <linux/errno.h>
21 #include <linux/signal.h>
22 #include <linux/binfmts.h>
23 #include <linux/string.h>
24 #include <linux/file.h>
25 #include <linux/fcntl.h>
26 #include <linux/ptrace.h>
27 #include <linux/slab.h>
28 #include <linux/shm.h>
29 #include <linux/personality.h>
30 #include <linux/elfcore.h>
31 #include <linux/init.h>
32 #include <linux/highuid.h>
33 #include <linux/smp.h>
34 #include <linux/smp_lock.h>
35 #include <linux/compiler.h>
36 #include <linux/highmem.h>
37 #include <linux/pagemap.h>
38 #include <linux/security.h>
39 #include <linux/syscalls.h>
41 #include <asm/uaccess.h>
42 #include <asm/param.h>
44 #include <linux/elf.h>
46 static int load_elf_binary(struct linux_binprm * bprm, struct pt_regs * regs);
47 static int load_elf_library(struct file*);
48 static unsigned long elf_map (struct file *, unsigned long, struct elf_phdr *, int, int, unsigned long);
49 extern int dump_fpu (struct pt_regs *, elf_fpregset_t *);
52 #define elf_addr_t unsigned long
56 * If we don't support core dumping, then supply a NULL so we
59 #ifdef USE_ELF_CORE_DUMP
60 static int elf_core_dump(long signr, struct pt_regs * regs, struct file * file);
62 #define elf_core_dump NULL
65 #if ELF_EXEC_PAGESIZE > PAGE_SIZE
66 # define ELF_MIN_ALIGN ELF_EXEC_PAGESIZE
68 # define ELF_MIN_ALIGN PAGE_SIZE
71 #define ELF_PAGESTART(_v) ((_v) & ~(unsigned long)(ELF_MIN_ALIGN-1))
72 #define ELF_PAGEOFFSET(_v) ((_v) & (ELF_MIN_ALIGN-1))
73 #define ELF_PAGEALIGN(_v) (((_v) + ELF_MIN_ALIGN - 1) & ~(ELF_MIN_ALIGN - 1))
75 static struct linux_binfmt elf_format = {
76 .module = THIS_MODULE,
77 .load_binary = load_elf_binary,
78 .load_shlib = load_elf_library,
79 .core_dump = elf_core_dump,
80 .min_coredump = ELF_EXEC_PAGESIZE
83 #define BAD_ADDR(x) ((unsigned long)(x) > TASK_SIZE)
85 static int set_brk(unsigned long start, unsigned long end)
87 start = ELF_PAGEALIGN(start);
88 end = ELF_PAGEALIGN(end);
90 unsigned long addr = do_brk(start, end - start);
94 current->mm->start_brk = current->mm->brk = end;
99 /* We need to explicitly zero any fractional pages
100 after the data section (i.e. bss). This would
101 contain the junk from the file that should not
105 static void padzero(unsigned long elf_bss)
109 nbyte = ELF_PAGEOFFSET(elf_bss);
111 nbyte = ELF_MIN_ALIGN - nbyte;
112 clear_user((void __user *) elf_bss, nbyte);
116 /* Let's use some macros to make this stack manipulation a litle clearer */
117 #ifdef CONFIG_STACK_GROWSUP
118 #define STACK_ADD(sp, items) ((elf_addr_t __user *)(sp) + (items))
119 #define STACK_ROUND(sp, items) \
120 ((15 + (unsigned long) ((sp) + (items))) &~ 15UL)
121 #define STACK_ALLOC(sp, len) ({ elf_addr_t __user *old_sp = (elf_addr_t __user *)sp; sp += len; old_sp; })
123 #define STACK_ADD(sp, items) ((elf_addr_t __user *)(sp) - (items))
124 #define STACK_ROUND(sp, items) \
125 (((unsigned long) (sp - items)) &~ 15UL)
126 #define STACK_ALLOC(sp, len) ({ sp -= len ; sp; })
130 create_elf_tables(struct linux_binprm *bprm, struct elfhdr * exec,
131 int interp_aout, unsigned long load_addr,
132 unsigned long interp_load_addr)
134 unsigned long p = bprm->p;
135 int argc = bprm->argc;
136 int envc = bprm->envc;
137 elf_addr_t __user *argv;
138 elf_addr_t __user *envp;
139 elf_addr_t __user *sp;
140 elf_addr_t __user *u_platform;
141 const char *k_platform = ELF_PLATFORM;
143 elf_addr_t *elf_info;
145 struct task_struct *tsk = current;
148 * If this architecture has a platform capability string, copy it
149 * to userspace. In some cases (Sparc), this info is impossible
150 * for userspace to get any other way, in others (i386) it is
156 size_t len = strlen(k_platform) + 1;
158 #ifdef __HAVE_ARCH_ALIGN_STACK
159 p = (unsigned long)arch_align_stack((unsigned long)p);
161 u_platform = (elf_addr_t __user *)STACK_ALLOC(p, len);
162 __copy_to_user(u_platform, k_platform, len);
165 /* Create the ELF interpreter info */
166 elf_info = (elf_addr_t *) current->mm->saved_auxv;
167 #define NEW_AUX_ENT(id, val) \
168 do { elf_info[ei_index++] = id; elf_info[ei_index++] = val; } while (0)
172 * ARCH_DLINFO must come first so PPC can do its special alignment of
177 NEW_AUX_ENT(AT_HWCAP, ELF_HWCAP);
178 NEW_AUX_ENT(AT_PAGESZ, ELF_EXEC_PAGESIZE);
179 NEW_AUX_ENT(AT_CLKTCK, CLOCKS_PER_SEC);
180 NEW_AUX_ENT(AT_PHDR, load_addr + exec->e_phoff);
181 NEW_AUX_ENT(AT_PHENT, sizeof (struct elf_phdr));
182 NEW_AUX_ENT(AT_PHNUM, exec->e_phnum);
183 NEW_AUX_ENT(AT_BASE, interp_load_addr);
184 NEW_AUX_ENT(AT_FLAGS, 0);
185 NEW_AUX_ENT(AT_ENTRY, exec->e_entry);
186 NEW_AUX_ENT(AT_UID, (elf_addr_t) tsk->uid);
187 NEW_AUX_ENT(AT_EUID, (elf_addr_t) tsk->euid);
188 NEW_AUX_ENT(AT_GID, (elf_addr_t) tsk->gid);
189 NEW_AUX_ENT(AT_EGID, (elf_addr_t) tsk->egid);
190 NEW_AUX_ENT(AT_SECURE, (elf_addr_t) security_bprm_secureexec(bprm));
192 NEW_AUX_ENT(AT_PLATFORM, (elf_addr_t)(unsigned long)u_platform);
194 if (bprm->interp_flags & BINPRM_FLAGS_EXECFD) {
195 NEW_AUX_ENT(AT_EXECFD, (elf_addr_t) bprm->interp_data);
198 /* AT_NULL is zero; clear the rest too */
199 memset(&elf_info[ei_index], 0,
200 sizeof current->mm->saved_auxv - ei_index * sizeof elf_info[0]);
202 /* And advance past the AT_NULL entry. */
205 sp = STACK_ADD(p, ei_index);
207 items = (argc + 1) + (envc + 1);
209 items += 3; /* a.out interpreters require argv & envp too */
211 items += 1; /* ELF interpreters only put argc on the stack */
213 bprm->p = STACK_ROUND(sp, items);
215 /* Point sp at the lowest address on the stack */
216 #ifdef CONFIG_STACK_GROWSUP
217 sp = (elf_addr_t __user *)bprm->p - items - ei_index;
218 bprm->exec = (unsigned long) sp; /* XXX: PARISC HACK */
220 sp = (elf_addr_t __user *)bprm->p;
223 /* Now, let's put argc (and argv, envp if appropriate) on the stack */
224 __put_user(argc, sp++);
227 envp = argv + argc + 1;
228 __put_user((elf_addr_t)(unsigned long)argv, sp++);
229 __put_user((elf_addr_t)(unsigned long)envp, sp++);
232 envp = argv + argc + 1;
235 /* Populate argv and envp */
236 p = current->mm->arg_start;
239 __put_user((elf_addr_t)p, argv++);
240 len = strnlen_user((void __user *)p, PAGE_SIZE*MAX_ARG_PAGES);
241 if (!len || len > PAGE_SIZE*MAX_ARG_PAGES)
246 current->mm->arg_end = current->mm->env_start = p;
249 __put_user((elf_addr_t)p, envp++);
250 len = strnlen_user((void __user *)p, PAGE_SIZE*MAX_ARG_PAGES);
251 if (!len || len > PAGE_SIZE*MAX_ARG_PAGES)
256 current->mm->env_end = p;
258 /* Put the elf_info on the stack in the right place. */
259 sp = (elf_addr_t __user *)envp + 1;
260 copy_to_user(sp, elf_info, ei_index * sizeof(elf_addr_t));
265 static unsigned long elf_map(struct file *filep, unsigned long addr,
266 struct elf_phdr *eppnt, int prot, int type,
267 unsigned long total_size)
269 unsigned long map_addr;
270 unsigned long size = eppnt->p_filesz + ELF_PAGEOFFSET(eppnt->p_vaddr);
271 unsigned long off = eppnt->p_offset - ELF_PAGEOFFSET(eppnt->p_vaddr);
273 addr = ELF_PAGESTART(addr);
274 size = ELF_PAGEALIGN(size);
276 down_write(¤t->mm->mmap_sem);
279 * total_size is the size of the ELF (interpreter) image.
280 * The _first_ mmap needs to know the full size, otherwise
281 * randomization might put this image into an overlapping
282 * position with the ELF binary image. (since size < total_size)
283 * So we first map the 'big' image - and unmap the remainder at
284 * the end. (which unmap is needed for ELF images with holes.)
287 total_size = ELF_PAGEALIGN(total_size);
288 map_addr = do_mmap(filep, addr, total_size, prot, type, off);
289 if (!BAD_ADDR(map_addr))
290 do_munmap(current->mm, map_addr+size, total_size-size);
292 map_addr = do_mmap(filep, addr, size, prot, type, off);
294 up_write(¤t->mm->mmap_sem);
299 #endif /* !elf_map */
301 static inline unsigned long total_mapping_size(struct elf_phdr *cmds, int nr)
303 int i, first_idx = -1, last_idx = -1;
305 for (i = 0; i < nr; i++)
306 if (cmds[i].p_type == PT_LOAD) {
315 return cmds[last_idx].p_vaddr + cmds[last_idx].p_memsz -
316 ELF_PAGESTART(cmds[first_idx].p_vaddr);
319 /* This is much more generalized than the library routine read function,
320 so we keep this separate. Technically the library read function
321 is only provided so that we can read a.out libraries that have
324 static unsigned long load_elf_interp(struct elfhdr * interp_elf_ex,
325 struct file * interpreter,
326 unsigned long *interp_load_addr,
327 unsigned long no_base)
329 struct elf_phdr *elf_phdata;
330 struct elf_phdr *eppnt;
331 unsigned long load_addr = 0;
332 int load_addr_set = 0;
333 unsigned long last_bss = 0, elf_bss = 0;
334 unsigned long error = ~0UL;
335 unsigned long total_size;
338 /* First of all, some simple consistency checks */
339 if (interp_elf_ex->e_type != ET_EXEC &&
340 interp_elf_ex->e_type != ET_DYN)
342 if (!elf_check_arch(interp_elf_ex))
344 if (!interpreter->f_op || !interpreter->f_op->mmap)
348 * If the size of this structure has changed, then punt, since
349 * we will be doing the wrong thing.
351 if (interp_elf_ex->e_phentsize != sizeof(struct elf_phdr))
353 if (interp_elf_ex->e_phnum > 65536U / sizeof(struct elf_phdr))
356 /* Now read in all of the header information */
358 size = sizeof(struct elf_phdr) * interp_elf_ex->e_phnum;
359 if (size > ELF_MIN_ALIGN)
361 elf_phdata = (struct elf_phdr *) kmalloc(size, GFP_KERNEL);
365 retval = kernel_read(interpreter,interp_elf_ex->e_phoff,(char *)elf_phdata,size);
370 total_size = total_mapping_size(elf_phdata, interp_elf_ex->e_phnum);
375 for (i=0; i<interp_elf_ex->e_phnum; i++, eppnt++) {
376 if (eppnt->p_type == PT_LOAD) {
377 int elf_type = MAP_PRIVATE | MAP_DENYWRITE;
379 unsigned long vaddr = 0;
380 unsigned long k, map_addr;
382 if (eppnt->p_flags & PF_R) elf_prot = PROT_READ;
383 if (eppnt->p_flags & PF_W) elf_prot |= PROT_WRITE;
384 if (eppnt->p_flags & PF_X) elf_prot |= PROT_EXEC;
385 vaddr = eppnt->p_vaddr;
386 if (interp_elf_ex->e_type == ET_EXEC || load_addr_set)
387 elf_type |= MAP_FIXED;
388 else if (no_base && interp_elf_ex->e_type == ET_DYN)
391 map_addr = elf_map(interpreter, load_addr + vaddr, eppnt, elf_prot, elf_type, total_size);
394 if (BAD_ADDR(map_addr))
397 if (!load_addr_set && interp_elf_ex->e_type == ET_DYN) {
398 load_addr = map_addr - ELF_PAGESTART(vaddr);
403 * Check to see if the section's size will overflow the
404 * allowed task size. Note that p_filesz must always be
405 * <= p_memsize so it is only necessary to check p_memsz.
407 k = load_addr + eppnt->p_vaddr;
408 if (k > TASK_SIZE || eppnt->p_filesz > eppnt->p_memsz ||
409 eppnt->p_memsz > TASK_SIZE || TASK_SIZE - eppnt->p_memsz < k) {
415 * Find the end of the file mapping for this phdr, and keep
416 * track of the largest address we see for this.
418 k = load_addr + eppnt->p_vaddr + eppnt->p_filesz;
423 * Do the same thing for the memory mapping - between
424 * elf_bss and last_bss is the bss section.
426 k = load_addr + eppnt->p_memsz + eppnt->p_vaddr;
433 * Now fill out the bss section. First pad the last page up
434 * to the page boundary, and then perform a mmap to make sure
435 * that there are zero-mapped pages up to and including the
439 elf_bss = ELF_PAGESTART(elf_bss + ELF_MIN_ALIGN - 1); /* What we have mapped so far */
441 /* Map the last of the bss segment */
442 if (last_bss > elf_bss) {
443 error = do_brk(elf_bss, last_bss - elf_bss);
448 *interp_load_addr = load_addr;
449 error = ((unsigned long) interp_elf_ex->e_entry) + load_addr;
457 static unsigned long load_aout_interp(struct exec * interp_ex,
458 struct file * interpreter)
460 unsigned long text_data, elf_entry = ~0UL;
464 current->mm->end_code = interp_ex->a_text;
465 text_data = interp_ex->a_text + interp_ex->a_data;
466 current->mm->end_data = text_data;
467 current->mm->brk = interp_ex->a_bss + text_data;
469 switch (N_MAGIC(*interp_ex)) {
472 addr = (char __user *)0;
476 offset = N_TXTOFF(*interp_ex);
477 addr = (char __user *) N_TXTADDR(*interp_ex);
483 do_brk(0, text_data);
484 if (!interpreter->f_op || !interpreter->f_op->read)
486 if (interpreter->f_op->read(interpreter, addr, text_data, &offset) < 0)
488 flush_icache_range((unsigned long)addr,
489 (unsigned long)addr + text_data);
491 do_brk(ELF_PAGESTART(text_data + ELF_MIN_ALIGN - 1),
493 elf_entry = interp_ex->a_entry;
500 * These are the functions used to load ELF style executables and shared
501 * libraries. There is no binary dependent code anywhere else.
504 #define INTERPRETER_NONE 0
505 #define INTERPRETER_AOUT 1
506 #define INTERPRETER_ELF 2
509 static int load_elf_binary(struct linux_binprm * bprm, struct pt_regs * regs)
511 struct file *interpreter = NULL; /* to shut gcc up */
512 unsigned long load_addr = 0, load_bias = 0;
513 int load_addr_set = 0;
514 char * elf_interpreter = NULL;
515 unsigned int interpreter_type = INTERPRETER_NONE;
516 unsigned char ibcs2_interpreter = 0;
518 struct elf_phdr * elf_ppnt, *elf_phdata;
519 unsigned long elf_bss, elf_brk;
523 unsigned long elf_entry, interp_load_addr = 0;
524 unsigned long start_code, end_code, start_data, end_data;
525 unsigned long reloc_func_desc = 0;
526 struct elfhdr elf_ex;
527 struct elfhdr interp_elf_ex;
528 struct exec interp_ex;
529 char passed_fileno[6];
530 struct files_struct *files;
531 int have_pt_gnu_stack, executable_stack, relocexec, old_relocexec = current->flags & PF_RELOCEXEC;
532 unsigned long def_flags = 0;
534 /* Get the exec-header */
535 elf_ex = *((struct elfhdr *) bprm->buf);
538 /* First of all, some simple consistency checks */
539 if (memcmp(elf_ex.e_ident, ELFMAG, SELFMAG) != 0)
542 if (elf_ex.e_type != ET_EXEC && elf_ex.e_type != ET_DYN)
544 if (!elf_check_arch(&elf_ex))
546 if (!bprm->file->f_op||!bprm->file->f_op->mmap)
549 /* Now read in all of the header information */
552 if (elf_ex.e_phentsize != sizeof(struct elf_phdr))
554 if (elf_ex.e_phnum > 65536U / sizeof(struct elf_phdr))
556 size = elf_ex.e_phnum * sizeof(struct elf_phdr);
557 elf_phdata = (struct elf_phdr *) kmalloc(size, GFP_KERNEL);
561 retval = kernel_read(bprm->file, elf_ex.e_phoff, (char *) elf_phdata, size);
565 files = current->files; /* Refcounted so ok */
566 retval = unshare_files();
569 if (files == current->files) {
570 put_files_struct(files);
574 /* exec will make our files private anyway, but for the a.out
575 loader stuff we need to do it earlier */
577 retval = get_unused_fd();
580 get_file(bprm->file);
581 fd_install(elf_exec_fileno = retval, bprm->file);
583 elf_ppnt = elf_phdata;
592 for (i = 0; i < elf_ex.e_phnum; i++) {
593 if (elf_ppnt->p_type == PT_INTERP) {
594 /* This is the program interpreter used for
595 * shared libraries - for now assume that this
596 * is an a.out format binary
600 if (elf_ppnt->p_filesz > PATH_MAX)
602 elf_interpreter = (char *) kmalloc(elf_ppnt->p_filesz,
604 if (!elf_interpreter)
607 retval = kernel_read(bprm->file, elf_ppnt->p_offset,
611 goto out_free_interp;
612 /* If the program interpreter is one of these two,
613 * then assume an iBCS2 image. Otherwise assume
614 * a native linux image.
616 if (strcmp(elf_interpreter,"/usr/lib/libc.so.1") == 0 ||
617 strcmp(elf_interpreter,"/usr/lib/ld.so.1") == 0)
618 ibcs2_interpreter = 1;
621 * The early SET_PERSONALITY here is so that the lookup
622 * for the interpreter happens in the namespace of the
623 * to-be-execed image. SET_PERSONALITY can select an
626 * However, SET_PERSONALITY is NOT allowed to switch
627 * this task into the new images's memory mapping
628 * policy - that is, TASK_SIZE must still evaluate to
629 * that which is appropriate to the execing application.
630 * This is because exit_mmap() needs to have TASK_SIZE
631 * evaluate to the size of the old image.
633 * So if (say) a 64-bit application is execing a 32-bit
634 * application it is the architecture's responsibility
635 * to defer changing the value of TASK_SIZE until the
636 * switch really is going to happen - do this in
637 * flush_thread(). - akpm
639 SET_PERSONALITY(elf_ex, ibcs2_interpreter);
641 interpreter = open_exec(elf_interpreter);
642 retval = PTR_ERR(interpreter);
643 if (IS_ERR(interpreter))
644 goto out_free_interp;
645 retval = kernel_read(interpreter, 0, bprm->buf, BINPRM_BUF_SIZE);
647 goto out_free_dentry;
649 /* Get the exec headers */
650 interp_ex = *((struct exec *) bprm->buf);
651 interp_elf_ex = *((struct elfhdr *) bprm->buf);
657 elf_ppnt = elf_phdata;
658 executable_stack = EXSTACK_DEFAULT;
660 for (i = 0; i < elf_ex.e_phnum; i++, elf_ppnt++)
661 if (elf_ppnt->p_type == PT_GNU_STACK) {
662 if (elf_ppnt->p_flags & PF_X)
663 executable_stack = EXSTACK_ENABLE_X;
665 executable_stack = EXSTACK_DISABLE_X;
668 have_pt_gnu_stack = (i < elf_ex.e_phnum);
672 if (current->personality == PER_LINUX)
673 switch (exec_shield) {
675 if (executable_stack != EXSTACK_DEFAULT) {
676 current->flags |= PF_RELOCEXEC;
677 relocexec = PF_RELOCEXEC;
682 executable_stack = EXSTACK_DISABLE_X;
683 current->flags |= PF_RELOCEXEC;
684 relocexec = PF_RELOCEXEC;
688 /* Some simple consistency checks for the interpreter */
689 if (elf_interpreter) {
690 interpreter_type = INTERPRETER_ELF | INTERPRETER_AOUT;
692 /* Now figure out which format our binary is */
693 if ((N_MAGIC(interp_ex) != OMAGIC) &&
694 (N_MAGIC(interp_ex) != ZMAGIC) &&
695 (N_MAGIC(interp_ex) != QMAGIC))
696 interpreter_type = INTERPRETER_ELF;
698 if (memcmp(interp_elf_ex.e_ident, ELFMAG, SELFMAG) != 0)
699 interpreter_type &= ~INTERPRETER_ELF;
702 if (!interpreter_type)
703 goto out_free_dentry;
705 /* Make sure only one type was selected */
706 if ((interpreter_type & INTERPRETER_ELF) &&
707 interpreter_type != INTERPRETER_ELF) {
708 // FIXME - ratelimit this before re-enabling
709 // printk(KERN_WARNING "ELF: Ambiguous type, using ELF\n");
710 interpreter_type = INTERPRETER_ELF;
712 /* Verify the interpreter has a valid arch */
713 if ((interpreter_type == INTERPRETER_ELF) &&
714 !elf_check_arch(&interp_elf_ex))
715 goto out_free_dentry;
717 /* Executables without an interpreter also need a personality */
718 SET_PERSONALITY(elf_ex, ibcs2_interpreter);
721 /* OK, we are done with that, now set up the arg stuff,
722 and then start this sucker up */
724 if ((!bprm->sh_bang) && (interpreter_type == INTERPRETER_AOUT)) {
725 char *passed_p = passed_fileno;
726 sprintf(passed_fileno, "%d", elf_exec_fileno);
728 if (elf_interpreter) {
729 retval = copy_strings_kernel(1, &passed_p, bprm);
731 goto out_free_dentry;
736 /* Flush all traces of the currently running executable */
737 retval = flush_old_exec(bprm);
739 goto out_free_dentry;
740 current->flags |= relocexec;
744 * Turn off the CS limit completely if exec-shield disabled or
748 arch_add_exec_range(current->mm, -1);
751 /* Discard our unneeded old files struct */
754 put_files_struct(files);
758 /* OK, This is the point of no return */
759 current->mm->start_data = 0;
760 current->mm->end_data = 0;
761 current->mm->end_code = 0;
762 current->mm->mmap = NULL;
763 current->flags &= ~PF_FORKNOEXEC;
764 current->mm->def_flags = def_flags;
766 /* Do this immediately, since STACK_TOP as used in setup_arg_pages
767 may depend on the personality. */
768 SET_PERSONALITY(elf_ex, ibcs2_interpreter);
769 if (elf_read_implies_exec(elf_ex, have_pt_gnu_stack))
770 current->personality |= READ_IMPLIES_EXEC;
772 /* Do this so that we can load the interpreter, if need be. We will
773 change some of these later */
774 // current->mm->rss = 0;
775 vx_rsspages_sub(current->mm, current->mm->rss);
776 current->mm->free_area_cache = current->mm->mmap_base;
777 retval = setup_arg_pages(bprm, executable_stack);
779 send_sig(SIGKILL, current, 0);
780 goto out_free_dentry;
783 current->mm->start_stack = bprm->p;
786 /* Now we do a little grungy work by mmaping the ELF image into
787 the correct location in memory.
790 for(i = 0, elf_ppnt = elf_phdata; i < elf_ex.e_phnum; i++, elf_ppnt++) {
791 int elf_prot = 0, elf_flags;
792 unsigned long k, vaddr;
794 if (elf_ppnt->p_type != PT_LOAD)
797 if (unlikely (elf_brk > elf_bss)) {
800 /* There was a PT_LOAD segment with p_memsz > p_filesz
801 before this one. Map anonymous pages, if needed,
802 and clear the area. */
803 retval = set_brk (elf_bss + load_bias,
804 elf_brk + load_bias);
806 send_sig(SIGKILL, current, 0);
807 goto out_free_dentry;
809 nbyte = ELF_PAGEOFFSET(elf_bss);
811 nbyte = ELF_MIN_ALIGN - nbyte;
812 if (nbyte > elf_brk - elf_bss)
813 nbyte = elf_brk - elf_bss;
814 clear_user((void __user *) elf_bss + load_bias, nbyte);
818 if (elf_ppnt->p_flags & PF_R) elf_prot |= PROT_READ;
819 if (elf_ppnt->p_flags & PF_W) elf_prot |= PROT_WRITE;
820 if (elf_ppnt->p_flags & PF_X) elf_prot |= PROT_EXEC;
822 elf_flags = MAP_PRIVATE|MAP_DENYWRITE|MAP_EXECUTABLE;
824 vaddr = elf_ppnt->p_vaddr;
825 if (elf_ex.e_type == ET_EXEC || load_addr_set)
826 elf_flags |= MAP_FIXED;
827 else if (elf_ex.e_type == ET_DYN)
831 load_bias = ELF_PAGESTART(ELF_ET_DYN_BASE - vaddr);
834 error = elf_map(bprm->file, load_bias + vaddr, elf_ppnt, elf_prot, elf_flags, 0);
838 if (!load_addr_set) {
840 load_addr = (elf_ppnt->p_vaddr - elf_ppnt->p_offset);
841 if (elf_ex.e_type == ET_DYN) {
843 ELF_PAGESTART(load_bias + vaddr);
844 load_addr += load_bias;
845 reloc_func_desc = load_bias;
848 k = elf_ppnt->p_vaddr;
849 if (k < start_code) start_code = k;
850 if (start_data < k) start_data = k;
853 * Check to see if the section's size will overflow the
854 * allowed task size. Note that p_filesz must always be
855 * <= p_memsz so it is only necessary to check p_memsz.
857 if (k > TASK_SIZE || elf_ppnt->p_filesz > elf_ppnt->p_memsz ||
858 elf_ppnt->p_memsz > TASK_SIZE ||
859 TASK_SIZE - elf_ppnt->p_memsz < k) {
860 /* set_brk can never work. Avoid overflows. */
861 send_sig(SIGKILL, current, 0);
862 goto out_free_dentry;
865 k = elf_ppnt->p_vaddr + elf_ppnt->p_filesz;
869 if ((elf_ppnt->p_flags & PF_X) && end_code < k)
873 k = elf_ppnt->p_vaddr + elf_ppnt->p_memsz;
878 elf_ex.e_entry += load_bias;
879 elf_bss += load_bias;
880 elf_brk += load_bias;
881 start_code += load_bias;
882 end_code += load_bias;
883 start_data += load_bias;
884 end_data += load_bias;
886 /* Calling set_brk effectively mmaps the pages that we need
887 * for the bss and break sections. We must do this before
888 * mapping in the interpreter, to make sure it doesn't wind
889 * up getting placed where the bss needs to go.
891 retval = set_brk(elf_bss, elf_brk);
893 send_sig(SIGKILL, current, 0);
894 goto out_free_dentry;
898 if (elf_interpreter) {
899 if (interpreter_type == INTERPRETER_AOUT)
900 elf_entry = load_aout_interp(&interp_ex,
903 elf_entry = load_elf_interp(&interp_elf_ex,
907 if (BAD_ADDR(elf_entry)) {
908 printk(KERN_ERR "Unable to load interpreter\n");
909 send_sig(SIGSEGV, current, 0);
910 retval = -ENOEXEC; /* Nobody gets to see this, but.. */
911 goto out_free_dentry;
913 reloc_func_desc = interp_load_addr;
915 allow_write_access(interpreter);
917 kfree(elf_interpreter);
919 elf_entry = elf_ex.e_entry;
924 if (interpreter_type != INTERPRETER_AOUT)
925 sys_close(elf_exec_fileno);
927 set_binfmt(&elf_format);
930 * Map the vsyscall trampoline. This address is then passed via
933 #ifdef __HAVE_ARCH_VSYSCALL
938 current->flags &= ~PF_FORKNOEXEC;
939 create_elf_tables(bprm, &elf_ex, (interpreter_type == INTERPRETER_AOUT),
940 load_addr, interp_load_addr);
941 /* N.B. passed_fileno might not be initialized? */
942 if (interpreter_type == INTERPRETER_AOUT)
943 current->mm->arg_start += strlen(passed_fileno) + 1;
944 current->mm->end_code = end_code;
945 current->mm->start_code = start_code;
946 current->mm->start_data = start_data;
947 current->mm->end_data = end_data;
948 current->mm->start_stack = bprm->p;
950 #ifdef __HAVE_ARCH_RANDOMIZE_BRK
951 if (current->flags & PF_RELOCEXEC)
952 randomize_brk(elf_brk);
954 if (current->personality & MMAP_PAGE_ZERO) {
955 /* Why this, you ask??? Well SVr4 maps page 0 as read-only,
956 and some applications "depend" upon this behavior.
957 Since we do not have the power to recompile these, we
958 emulate the SVr4 behavior. Sigh. */
959 down_write(¤t->mm->mmap_sem);
960 error = do_mmap(NULL, 0, PAGE_SIZE, PROT_READ | PROT_EXEC,
961 MAP_FIXED | MAP_PRIVATE, 0);
962 up_write(¤t->mm->mmap_sem);
967 * The ABI may specify that certain registers be set up in special
968 * ways (on i386 %edx is the address of a DT_FINI function, for
969 * example. In addition, it may also specify (eg, PowerPC64 ELF)
970 * that the e_entry field is the address of the function descriptor
971 * for the startup routine, rather than the address of the startup
972 * routine itself. This macro performs whatever initialization to
973 * the regs structure is required as well as any relocations to the
974 * function descriptor entries when executing dynamically links apps.
976 ELF_PLAT_INIT(regs, reloc_func_desc);
979 start_thread(regs, elf_entry, bprm->p);
980 if (unlikely(current->ptrace & PT_PTRACED)) {
981 if (current->ptrace & PT_TRACE_EXEC)
982 ptrace_notify ((PTRACE_EVENT_EXEC << 8) | SIGTRAP);
984 send_sig(SIGTRAP, current, 0);
992 allow_write_access(interpreter);
997 kfree(elf_interpreter);
999 sys_close(elf_exec_fileno);
1002 put_files_struct(current->files);
1003 current->files = files;
1007 current->flags &= ~PF_RELOCEXEC;
1008 current->flags |= old_relocexec;
1012 /* This is really simpleminded and specialized - we are loading an
1013 a.out library that is given an ELF header. */
1015 static int load_elf_library(struct file *file)
1017 struct elf_phdr *elf_phdata;
1018 unsigned long elf_bss, bss, len;
1019 int retval, error, i, j;
1020 struct elfhdr elf_ex;
1023 retval = kernel_read(file, 0, (char *) &elf_ex, sizeof(elf_ex));
1024 if (retval != sizeof(elf_ex))
1027 if (memcmp(elf_ex.e_ident, ELFMAG, SELFMAG) != 0)
1030 /* First of all, some simple consistency checks */
1031 if (elf_ex.e_type != ET_EXEC || elf_ex.e_phnum > 2 ||
1032 !elf_check_arch(&elf_ex) || !file->f_op || !file->f_op->mmap)
1035 /* Now read in all of the header information */
1037 j = sizeof(struct elf_phdr) * elf_ex.e_phnum;
1038 /* j < ELF_MIN_ALIGN because elf_ex.e_phnum <= 2 */
1041 elf_phdata = (struct elf_phdr *) kmalloc(j, GFP_KERNEL);
1046 retval = kernel_read(file, elf_ex.e_phoff, (char *) elf_phdata, j);
1050 for (j = 0, i = 0; i<elf_ex.e_phnum; i++)
1051 if ((elf_phdata + i)->p_type == PT_LOAD) j++;
1055 while (elf_phdata->p_type != PT_LOAD) elf_phdata++;
1057 /* Now use mmap to map the library into memory. */
1058 down_write(¤t->mm->mmap_sem);
1059 error = do_mmap(file,
1060 ELF_PAGESTART(elf_phdata->p_vaddr),
1061 (elf_phdata->p_filesz +
1062 ELF_PAGEOFFSET(elf_phdata->p_vaddr)),
1063 PROT_READ | PROT_WRITE | PROT_EXEC,
1064 MAP_FIXED | MAP_PRIVATE | MAP_DENYWRITE,
1065 (elf_phdata->p_offset -
1066 ELF_PAGEOFFSET(elf_phdata->p_vaddr)));
1067 up_write(¤t->mm->mmap_sem);
1068 if (error != ELF_PAGESTART(elf_phdata->p_vaddr))
1071 elf_bss = elf_phdata->p_vaddr + elf_phdata->p_filesz;
1074 len = ELF_PAGESTART(elf_phdata->p_filesz + elf_phdata->p_vaddr + ELF_MIN_ALIGN - 1);
1075 bss = elf_phdata->p_memsz + elf_phdata->p_vaddr;
1077 do_brk(len, bss - len);
1087 * Note that some platforms still use traditional core dumps and not
1088 * the ELF core dump. Each platform can select it as appropriate.
1090 #ifdef USE_ELF_CORE_DUMP
1095 * Modelled on fs/exec.c:aout_core_dump()
1096 * Jeremy Fitzhardinge <jeremy@sw.oz.au>
1099 * These are the only things you should do on a core-file: use only these
1100 * functions to write out all the necessary info.
1102 static int dump_write(struct file *file, const void *addr, int nr)
1104 return file->f_op->write(file, addr, nr, &file->f_pos) == nr;
1107 static int dump_seek(struct file *file, off_t off)
1109 if (file->f_op->llseek) {
1110 if (file->f_op->llseek(file, off, 0) != off)
1118 * Decide whether a segment is worth dumping; default is yes to be
1119 * sure (missing info is worse than too much; etc).
1120 * Personally I'd include everything, and use the coredump limit...
1122 * I think we should skip something. But I am not sure how. H.J.
1124 static int maydump(struct vm_area_struct *vma)
1127 * If we may not read the contents, don't allow us to dump
1128 * them either. "dump_write()" can't handle it anyway.
1130 if (!(vma->vm_flags & VM_READ))
1133 /* Do not dump I/O mapped devices! -DaveM */
1134 if (vma->vm_flags & VM_IO)
1137 if (vma->vm_flags & (VM_WRITE|VM_GROWSUP|VM_GROWSDOWN))
1139 if (vma->vm_flags & (VM_READ|VM_EXEC|VM_EXECUTABLE|VM_SHARED))
1145 #define roundup(x, y) ((((x)+((y)-1))/(y))*(y))
1147 /* An ELF note in memory */
1152 unsigned int datasz;
1156 static int notesize(struct memelfnote *en)
1160 sz = sizeof(struct elf_note);
1161 sz += roundup(strlen(en->name) + 1, 4);
1162 sz += roundup(en->datasz, 4);
1167 #define DUMP_WRITE(addr, nr) \
1168 do { if (!dump_write(file, (addr), (nr))) return 0; } while(0)
1169 #define DUMP_SEEK(off) \
1170 do { if (!dump_seek(file, (off))) return 0; } while(0)
1172 static int writenote(struct memelfnote *men, struct file *file)
1176 en.n_namesz = strlen(men->name) + 1;
1177 en.n_descsz = men->datasz;
1178 en.n_type = men->type;
1180 DUMP_WRITE(&en, sizeof(en));
1181 DUMP_WRITE(men->name, en.n_namesz);
1182 /* XXX - cast from long long to long to avoid need for libgcc.a */
1183 DUMP_SEEK(roundup((unsigned long)file->f_pos, 4)); /* XXX */
1184 DUMP_WRITE(men->data, men->datasz);
1185 DUMP_SEEK(roundup((unsigned long)file->f_pos, 4)); /* XXX */
1192 #define DUMP_WRITE(addr, nr) \
1193 if ((size += (nr)) > limit || !dump_write(file, (addr), (nr))) \
1195 #define DUMP_SEEK(off) \
1196 if (!dump_seek(file, (off))) \
1199 static inline void fill_elf_header(struct elfhdr *elf, int segs)
1201 memcpy(elf->e_ident, ELFMAG, SELFMAG);
1202 elf->e_ident[EI_CLASS] = ELF_CLASS;
1203 elf->e_ident[EI_DATA] = ELF_DATA;
1204 elf->e_ident[EI_VERSION] = EV_CURRENT;
1205 elf->e_ident[EI_OSABI] = ELF_OSABI;
1206 memset(elf->e_ident+EI_PAD, 0, EI_NIDENT-EI_PAD);
1208 elf->e_type = ET_CORE;
1209 elf->e_machine = ELF_ARCH;
1210 elf->e_version = EV_CURRENT;
1212 elf->e_phoff = sizeof(struct elfhdr);
1215 elf->e_ehsize = sizeof(struct elfhdr);
1216 elf->e_phentsize = sizeof(struct elf_phdr);
1217 elf->e_phnum = segs;
1218 elf->e_shentsize = 0;
1220 elf->e_shstrndx = 0;
1224 static inline void fill_elf_note_phdr(struct elf_phdr *phdr, int sz, off_t offset)
1226 phdr->p_type = PT_NOTE;
1227 phdr->p_offset = offset;
1230 phdr->p_filesz = sz;
1237 static void fill_note(struct memelfnote *note, const char *name, int type,
1238 unsigned int sz, void *data)
1248 * fill up all the fields in prstatus from the given task struct, except registers
1249 * which need to be filled up separately.
1251 static void fill_prstatus(struct elf_prstatus *prstatus,
1252 struct task_struct *p, long signr)
1254 prstatus->pr_info.si_signo = prstatus->pr_cursig = signr;
1255 prstatus->pr_sigpend = p->pending.signal.sig[0];
1256 prstatus->pr_sighold = p->blocked.sig[0];
1257 prstatus->pr_pid = p->pid;
1258 prstatus->pr_ppid = p->parent->pid;
1259 prstatus->pr_pgrp = process_group(p);
1260 prstatus->pr_sid = p->signal->session;
1261 jiffies_to_timeval(p->utime, &prstatus->pr_utime);
1262 jiffies_to_timeval(p->stime, &prstatus->pr_stime);
1263 jiffies_to_timeval(p->cutime, &prstatus->pr_cutime);
1264 jiffies_to_timeval(p->cstime, &prstatus->pr_cstime);
1267 static void fill_psinfo(struct elf_prpsinfo *psinfo, struct task_struct *p,
1268 struct mm_struct *mm)
1272 /* first copy the parameters from user space */
1273 memset(psinfo, 0, sizeof(struct elf_prpsinfo));
1275 len = mm->arg_end - mm->arg_start;
1276 if (len >= ELF_PRARGSZ)
1277 len = ELF_PRARGSZ-1;
1278 copy_from_user(&psinfo->pr_psargs,
1279 (const char __user *)mm->arg_start, len);
1280 for(i = 0; i < len; i++)
1281 if (psinfo->pr_psargs[i] == 0)
1282 psinfo->pr_psargs[i] = ' ';
1283 psinfo->pr_psargs[len] = 0;
1285 psinfo->pr_pid = p->pid;
1286 psinfo->pr_ppid = p->parent->pid;
1287 psinfo->pr_pgrp = process_group(p);
1288 psinfo->pr_sid = p->signal->session;
1290 i = p->state ? ffz(~p->state) + 1 : 0;
1291 psinfo->pr_state = i;
1292 psinfo->pr_sname = (i < 0 || i > 5) ? '.' : "RSDTZW"[i];
1293 psinfo->pr_zomb = psinfo->pr_sname == 'Z';
1294 psinfo->pr_nice = task_nice(p);
1295 psinfo->pr_flag = p->flags;
1296 SET_UID(psinfo->pr_uid, p->uid);
1297 SET_GID(psinfo->pr_gid, p->gid);
1298 strncpy(psinfo->pr_fname, p->comm, sizeof(psinfo->pr_fname));
1303 /* Here is the structure in which status of each thread is captured. */
1304 struct elf_thread_status
1306 struct list_head list;
1307 struct elf_prstatus prstatus; /* NT_PRSTATUS */
1308 elf_fpregset_t fpu; /* NT_PRFPREG */
1309 #ifdef ELF_CORE_COPY_XFPREGS
1310 elf_fpxregset_t xfpu; /* NT_PRXFPREG */
1312 struct memelfnote notes[3];
1317 * In order to add the specific thread information for the elf file format,
1318 * we need to keep a linked list of every threads pr_status and then
1319 * create a single section for them in the final core file.
1321 static int elf_dump_thread_status(long signr, struct task_struct * p, struct list_head * thread_list)
1324 struct elf_thread_status *t;
1327 t = kmalloc(sizeof(*t), GFP_ATOMIC);
1330 memset(t, 0, sizeof(*t));
1332 INIT_LIST_HEAD(&t->list);
1335 fill_prstatus(&t->prstatus, p, signr);
1336 elf_core_copy_task_regs(p, &t->prstatus.pr_reg);
1338 fill_note(&t->notes[0], "CORE", NT_PRSTATUS, sizeof(t->prstatus), &(t->prstatus));
1340 sz += notesize(&t->notes[0]);
1342 if ((t->prstatus.pr_fpvalid = elf_core_copy_task_fpregs(p, NULL, &t->fpu))) {
1343 fill_note(&t->notes[1], "CORE", NT_PRFPREG, sizeof(t->fpu), &(t->fpu));
1345 sz += notesize(&t->notes[1]);
1348 #ifdef ELF_CORE_COPY_XFPREGS
1349 if (elf_core_copy_task_xfpregs(p, &t->xfpu)) {
1350 fill_note(&t->notes[2], "LINUX", NT_PRXFPREG, sizeof(t->xfpu), &t->xfpu);
1352 sz += notesize(&t->notes[2]);
1355 list_add(&t->list, thread_list);
1362 * This is a two-pass process; first we find the offsets of the bits,
1363 * and then they are actually written out. If we run out of core limit
1366 static int elf_core_dump(long signr, struct pt_regs * regs, struct file * file)
1374 struct vm_area_struct *vma;
1375 struct elfhdr *elf = NULL;
1376 off_t offset = 0, dataoff;
1377 unsigned long limit = current->rlim[RLIMIT_CORE].rlim_cur;
1379 struct memelfnote *notes = NULL;
1380 struct elf_prstatus *prstatus = NULL; /* NT_PRSTATUS */
1381 struct elf_prpsinfo *psinfo = NULL; /* NT_PRPSINFO */
1382 struct task_struct *g, *p;
1383 LIST_HEAD(thread_list);
1384 struct list_head *t;
1385 elf_fpregset_t *fpu = NULL;
1386 #ifdef ELF_CORE_COPY_XFPREGS
1387 elf_fpxregset_t *xfpu = NULL;
1389 int thread_status_size = 0;
1393 * We no longer stop all VM operations.
1395 * This is because those proceses that could possibly change map_count or
1396 * the mmap / vma pages are now blocked in do_exit on current finishing
1399 * Only ptrace can touch these memory addresses, but it doesn't change
1400 * the map_count or the pages allocated. So no possibility of crashing
1401 * exists while dumping the mm->vm_next areas to the core file.
1404 /* alloc memory for large data structures: too large to be on stack */
1405 elf = kmalloc(sizeof(*elf), GFP_KERNEL);
1408 prstatus = kmalloc(sizeof(*prstatus), GFP_KERNEL);
1411 psinfo = kmalloc(sizeof(*psinfo), GFP_KERNEL);
1414 notes = kmalloc(NUM_NOTES * sizeof(struct memelfnote), GFP_KERNEL);
1417 fpu = kmalloc(sizeof(*fpu), GFP_KERNEL);
1420 #ifdef ELF_CORE_COPY_XFPREGS
1421 xfpu = kmalloc(sizeof(*xfpu), GFP_KERNEL);
1426 /* capture the status of all other threads */
1428 read_lock(&tasklist_lock);
1430 if (current->mm == p->mm && current != p) {
1431 int sz = elf_dump_thread_status(signr, p, &thread_list);
1433 read_unlock(&tasklist_lock);
1436 thread_status_size += sz;
1438 while_each_thread(g,p);
1439 read_unlock(&tasklist_lock);
1442 /* now collect the dump for the current */
1443 memset(prstatus, 0, sizeof(*prstatus));
1444 fill_prstatus(prstatus, current, signr);
1445 elf_core_copy_regs(&prstatus->pr_reg, regs);
1447 segs = current->mm->map_count;
1448 #ifdef ELF_CORE_EXTRA_PHDRS
1449 segs += ELF_CORE_EXTRA_PHDRS;
1453 fill_elf_header(elf, segs+1); /* including notes section */
1456 current->flags |= PF_DUMPCORE;
1459 * Set up the notes in similar form to SVR4 core dumps made
1460 * with info from their /proc.
1463 fill_note(notes +0, "CORE", NT_PRSTATUS, sizeof(*prstatus), prstatus);
1465 fill_psinfo(psinfo, current->group_leader, current->mm);
1466 fill_note(notes +1, "CORE", NT_PRPSINFO, sizeof(*psinfo), psinfo);
1468 fill_note(notes +2, "CORE", NT_TASKSTRUCT, sizeof(*current), current);
1472 auxv = (elf_addr_t *) current->mm->saved_auxv;
1477 while (auxv[i - 2] != AT_NULL);
1478 fill_note(¬es[numnote++], "CORE", NT_AUXV,
1479 i * sizeof (elf_addr_t), auxv);
1481 /* Try to dump the FPU. */
1482 if ((prstatus->pr_fpvalid = elf_core_copy_task_fpregs(current, regs, fpu)))
1483 fill_note(notes + numnote++,
1484 "CORE", NT_PRFPREG, sizeof(*fpu), fpu);
1485 #ifdef ELF_CORE_COPY_XFPREGS
1486 if (elf_core_copy_task_xfpregs(current, xfpu))
1487 fill_note(notes + numnote++,
1488 "LINUX", NT_PRXFPREG, sizeof(*xfpu), xfpu);
1494 DUMP_WRITE(elf, sizeof(*elf));
1495 offset += sizeof(*elf); /* Elf header */
1496 offset += (segs+1) * sizeof(struct elf_phdr); /* Program headers */
1498 /* Write notes phdr entry */
1500 struct elf_phdr phdr;
1503 for (i = 0; i < numnote; i++)
1504 sz += notesize(notes + i);
1506 sz += thread_status_size;
1508 fill_elf_note_phdr(&phdr, sz, offset);
1510 DUMP_WRITE(&phdr, sizeof(phdr));
1513 /* Page-align dumped data */
1514 dataoff = offset = roundup(offset, ELF_EXEC_PAGESIZE);
1516 /* Write program headers for segments dump */
1517 for (vma = current->mm->mmap; vma != NULL; vma = vma->vm_next) {
1518 struct elf_phdr phdr;
1521 sz = vma->vm_end - vma->vm_start;
1523 phdr.p_type = PT_LOAD;
1524 phdr.p_offset = offset;
1525 phdr.p_vaddr = vma->vm_start;
1527 phdr.p_filesz = maydump(vma) ? sz : 0;
1529 offset += phdr.p_filesz;
1530 phdr.p_flags = vma->vm_flags & VM_READ ? PF_R : 0;
1531 if (vma->vm_flags & VM_WRITE) phdr.p_flags |= PF_W;
1532 if (vma->vm_flags & VM_EXEC) phdr.p_flags |= PF_X;
1533 phdr.p_align = ELF_EXEC_PAGESIZE;
1535 DUMP_WRITE(&phdr, sizeof(phdr));
1538 #ifdef ELF_CORE_WRITE_EXTRA_PHDRS
1539 ELF_CORE_WRITE_EXTRA_PHDRS;
1542 /* write out the notes section */
1543 for (i = 0; i < numnote; i++)
1544 if (!writenote(notes + i, file))
1547 /* write out the thread status notes section */
1548 list_for_each(t, &thread_list) {
1549 struct elf_thread_status *tmp = list_entry(t, struct elf_thread_status, list);
1550 for (i = 0; i < tmp->num_notes; i++)
1551 if (!writenote(&tmp->notes[i], file))
1557 for (vma = current->mm->mmap; vma != NULL; vma = vma->vm_next) {
1563 for (addr = vma->vm_start;
1565 addr += PAGE_SIZE) {
1567 struct vm_area_struct *vma;
1569 if (get_user_pages(current, current->mm, addr, 1, 0, 1,
1570 &page, &vma) <= 0) {
1571 DUMP_SEEK (file->f_pos + PAGE_SIZE);
1573 if (page == ZERO_PAGE(addr)) {
1574 DUMP_SEEK (file->f_pos + PAGE_SIZE);
1577 flush_cache_page(vma, addr);
1579 if ((size += PAGE_SIZE) > limit ||
1580 !dump_write(file, kaddr,
1583 page_cache_release(page);
1588 page_cache_release(page);
1593 #ifdef ELF_CORE_WRITE_EXTRA_DATA
1594 ELF_CORE_WRITE_EXTRA_DATA;
1597 if ((off_t) file->f_pos != offset) {
1599 printk("elf_core_dump: file->f_pos (%ld) != offset (%ld)\n",
1600 (off_t) file->f_pos, offset);
1607 while(!list_empty(&thread_list)) {
1608 struct list_head *tmp = thread_list.next;
1610 kfree(list_entry(tmp, struct elf_thread_status, list));
1618 #ifdef ELF_CORE_COPY_XFPREGS
1625 #endif /* USE_ELF_CORE_DUMP */
1627 static int __init init_elf_binfmt(void)
1629 return register_binfmt(&elf_format);
1632 static void __exit exit_elf_binfmt(void)
1634 /* Remove the COFF and ELF loaders. */
1635 unregister_binfmt(&elf_format);
1638 core_initcall(init_elf_binfmt);
1639 module_exit(exit_elf_binfmt);
1640 MODULE_LICENSE("GPL");