2 * linux/fs/binfmt_elf.c
4 * These are the functions used to load ELF format executables as used
5 * on SVr4 machines. Information on the format may be found in the book
6 * "UNIX SYSTEM V RELEASE 4 Programmers Guide: Ansi C and Programming Support
9 * Copyright 1993, 1994: Eric Youngdale (ericy@cais.com).
12 #include <linux/module.h>
13 #include <linux/kernel.h>
15 #include <linux/stat.h>
16 #include <linux/time.h>
18 #include <linux/mman.h>
19 #include <linux/a.out.h>
20 #include <linux/errno.h>
21 #include <linux/signal.h>
22 #include <linux/binfmts.h>
23 #include <linux/string.h>
24 #include <linux/file.h>
25 #include <linux/fcntl.h>
26 #include <linux/ptrace.h>
27 #include <linux/slab.h>
28 #include <linux/shm.h>
29 #include <linux/personality.h>
30 #include <linux/elfcore.h>
31 #include <linux/init.h>
32 #include <linux/highuid.h>
33 #include <linux/smp.h>
34 #include <linux/smp_lock.h>
35 #include <linux/compiler.h>
36 #include <linux/highmem.h>
37 #include <linux/pagemap.h>
38 #include <linux/security.h>
39 #include <linux/syscalls.h>
41 #include <asm/uaccess.h>
42 #include <asm/param.h>
44 #include <linux/elf.h>
46 static int load_elf_binary(struct linux_binprm * bprm, struct pt_regs * regs);
47 static int load_elf_library(struct file*);
48 static unsigned long elf_map (struct file *, unsigned long, struct elf_phdr *, int, int, unsigned long);
49 extern int dump_fpu (struct pt_regs *, elf_fpregset_t *);
52 #define elf_addr_t unsigned long
56 * If we don't support core dumping, then supply a NULL so we
59 #ifdef USE_ELF_CORE_DUMP
60 static int elf_core_dump(long signr, struct pt_regs * regs, struct file * file);
62 #define elf_core_dump NULL
65 #if ELF_EXEC_PAGESIZE > PAGE_SIZE
66 # define ELF_MIN_ALIGN ELF_EXEC_PAGESIZE
68 # define ELF_MIN_ALIGN PAGE_SIZE
71 #define ELF_PAGESTART(_v) ((_v) & ~(unsigned long)(ELF_MIN_ALIGN-1))
72 #define ELF_PAGEOFFSET(_v) ((_v) & (ELF_MIN_ALIGN-1))
73 #define ELF_PAGEALIGN(_v) (((_v) + ELF_MIN_ALIGN - 1) & ~(ELF_MIN_ALIGN - 1))
75 static struct linux_binfmt elf_format = {
76 .module = THIS_MODULE,
77 .load_binary = load_elf_binary,
78 .load_shlib = load_elf_library,
79 .core_dump = elf_core_dump,
80 .min_coredump = ELF_EXEC_PAGESIZE
83 #define BAD_ADDR(x) ((unsigned long)(x) > TASK_SIZE)
85 static int set_brk(unsigned long start, unsigned long end)
87 start = ELF_PAGEALIGN(start);
88 end = ELF_PAGEALIGN(end);
90 unsigned long addr = do_brk(start, end - start);
94 current->mm->start_brk = current->mm->brk = end;
99 /* We need to explicitly zero any fractional pages
100 after the data section (i.e. bss). This would
101 contain the junk from the file that should not
105 static void padzero(unsigned long elf_bss)
109 nbyte = ELF_PAGEOFFSET(elf_bss);
111 nbyte = ELF_MIN_ALIGN - nbyte;
112 clear_user((void __user *) elf_bss, nbyte);
116 /* Let's use some macros to make this stack manipulation a litle clearer */
117 #ifdef CONFIG_STACK_GROWSUP
118 #define STACK_ADD(sp, items) ((elf_addr_t __user *)(sp) + (items))
119 #define STACK_ROUND(sp, items) \
120 ((15 + (unsigned long) ((sp) + (items))) &~ 15UL)
121 #define STACK_ALLOC(sp, len) ({ elf_addr_t __user *old_sp = (elf_addr_t __user *)sp; sp += len; old_sp; })
123 #define STACK_ADD(sp, items) ((elf_addr_t __user *)(sp) - (items))
124 #define STACK_ROUND(sp, items) \
125 (((unsigned long) (sp - items)) &~ 15UL)
126 #define STACK_ALLOC(sp, len) ({ sp -= len ; sp; })
130 create_elf_tables(struct linux_binprm *bprm, struct elfhdr * exec,
131 int interp_aout, unsigned long load_addr,
132 unsigned long interp_load_addr)
134 unsigned long p = bprm->p;
135 int argc = bprm->argc;
136 int envc = bprm->envc;
137 elf_addr_t __user *argv;
138 elf_addr_t __user *envp;
139 elf_addr_t __user *sp;
140 elf_addr_t __user *u_platform;
141 const char *k_platform = ELF_PLATFORM;
143 elf_addr_t *elf_info;
145 struct task_struct *tsk = current;
148 * If this architecture has a platform capability string, copy it
149 * to userspace. In some cases (Sparc), this info is impossible
150 * for userspace to get any other way, in others (i386) it is
156 size_t len = strlen(k_platform) + 1;
158 #ifdef __HAVE_ARCH_ALIGN_STACK
159 p = (unsigned long)arch_align_stack((unsigned long)p);
161 u_platform = (elf_addr_t __user *)STACK_ALLOC(p, len);
162 __copy_to_user(u_platform, k_platform, len);
165 /* Create the ELF interpreter info */
166 elf_info = (elf_addr_t *) current->mm->saved_auxv;
167 #define NEW_AUX_ENT(id, val) \
168 do { elf_info[ei_index++] = id; elf_info[ei_index++] = val; } while (0)
172 * ARCH_DLINFO must come first so PPC can do its special alignment of
177 NEW_AUX_ENT(AT_HWCAP, ELF_HWCAP);
178 NEW_AUX_ENT(AT_PAGESZ, ELF_EXEC_PAGESIZE);
179 NEW_AUX_ENT(AT_CLKTCK, CLOCKS_PER_SEC);
180 NEW_AUX_ENT(AT_PHDR, load_addr + exec->e_phoff);
181 NEW_AUX_ENT(AT_PHENT, sizeof (struct elf_phdr));
182 NEW_AUX_ENT(AT_PHNUM, exec->e_phnum);
183 NEW_AUX_ENT(AT_BASE, interp_load_addr);
184 NEW_AUX_ENT(AT_FLAGS, 0);
185 NEW_AUX_ENT(AT_ENTRY, exec->e_entry);
186 NEW_AUX_ENT(AT_UID, (elf_addr_t) tsk->uid);
187 NEW_AUX_ENT(AT_EUID, (elf_addr_t) tsk->euid);
188 NEW_AUX_ENT(AT_GID, (elf_addr_t) tsk->gid);
189 NEW_AUX_ENT(AT_EGID, (elf_addr_t) tsk->egid);
190 NEW_AUX_ENT(AT_SECURE, (elf_addr_t) security_bprm_secureexec(bprm));
192 NEW_AUX_ENT(AT_PLATFORM, (elf_addr_t)(long)u_platform);
194 if (bprm->interp_flags & BINPRM_FLAGS_EXECFD) {
195 NEW_AUX_ENT(AT_EXECFD, (elf_addr_t) bprm->interp_data);
198 /* AT_NULL is zero; clear the rest too */
199 memset(&elf_info[ei_index], 0,
200 sizeof current->mm->saved_auxv - ei_index * sizeof elf_info[0]);
202 /* And advance past the AT_NULL entry. */
205 sp = STACK_ADD(p, ei_index);
207 items = (argc + 1) + (envc + 1);
209 items += 3; /* a.out interpreters require argv & envp too */
211 items += 1; /* ELF interpreters only put argc on the stack */
213 bprm->p = STACK_ROUND(sp, items);
215 /* Point sp at the lowest address on the stack */
216 #ifdef CONFIG_STACK_GROWSUP
217 sp = (elf_addr_t __user *)bprm->p - items - ei_index;
218 bprm->exec = (unsigned long) sp; /* XXX: PARISC HACK */
220 sp = (elf_addr_t __user *)bprm->p;
223 /* Now, let's put argc (and argv, envp if appropriate) on the stack */
224 __put_user(argc, sp++);
227 envp = argv + argc + 1;
228 __put_user((elf_addr_t)(long)argv, sp++);
229 __put_user((elf_addr_t)(long)envp, sp++);
232 envp = argv + argc + 1;
235 /* Populate argv and envp */
236 p = current->mm->arg_start;
239 __put_user((elf_addr_t)p, argv++);
240 len = strnlen_user((void __user *)p, PAGE_SIZE*MAX_ARG_PAGES);
241 if (!len || len > PAGE_SIZE*MAX_ARG_PAGES)
246 current->mm->arg_end = current->mm->env_start = p;
249 __put_user((elf_addr_t)p, envp++);
250 len = strnlen_user((void __user *)p, PAGE_SIZE*MAX_ARG_PAGES);
251 if (!len || len > PAGE_SIZE*MAX_ARG_PAGES)
256 current->mm->env_end = p;
258 /* Put the elf_info on the stack in the right place. */
259 sp = (elf_addr_t __user *)envp + 1;
260 copy_to_user(sp, elf_info, ei_index * sizeof(elf_addr_t));
265 static unsigned long elf_map(struct file *filep, unsigned long addr,
266 struct elf_phdr *eppnt, int prot, int type,
267 unsigned long total_size)
269 unsigned long map_addr;
270 unsigned long size = eppnt->p_filesz + ELF_PAGEOFFSET(eppnt->p_vaddr);
271 unsigned long off = eppnt->p_offset - ELF_PAGEOFFSET(eppnt->p_vaddr);
273 addr = ELF_PAGESTART(addr);
274 size = ELF_PAGEALIGN(size);
276 down_write(¤t->mm->mmap_sem);
279 * total_size is the size of the ELF (interpreter) image.
280 * The _first_ mmap needs to know the full size, otherwise
281 * randomization might put this image into an overlapping
282 * position with the ELF binary image. (since size < total_size)
283 * So we first map the 'big' image - and unmap the remainder at
284 * the end. (which unmap is needed for ELF images with holes.)
287 total_size = ELF_PAGEALIGN(total_size);
288 map_addr = do_mmap(filep, addr, total_size, prot, type, off);
289 if (!BAD_ADDR(map_addr))
290 do_munmap(current->mm, map_addr+size, total_size-size);
292 map_addr = do_mmap(filep, addr, size, prot, type, off);
294 up_write(¤t->mm->mmap_sem);
299 #endif /* !elf_map */
301 static inline unsigned long total_mapping_size(struct elf_phdr *cmds, int nr)
303 int i, first_idx = -1, last_idx = -1;
305 for (i = 0; i < nr; i++)
306 if (cmds[i].p_type == PT_LOAD) {
315 return cmds[last_idx].p_vaddr + cmds[last_idx].p_memsz -
316 ELF_PAGESTART(cmds[first_idx].p_vaddr);
319 /* This is much more generalized than the library routine read function,
320 so we keep this separate. Technically the library read function
321 is only provided so that we can read a.out libraries that have
324 static unsigned long load_elf_interp(struct elfhdr * interp_elf_ex,
325 struct file * interpreter,
326 unsigned long *interp_load_addr,
327 unsigned long no_base)
329 struct elf_phdr *elf_phdata;
330 struct elf_phdr *eppnt;
331 unsigned long load_addr = 0;
332 int load_addr_set = 0;
333 unsigned long last_bss = 0, elf_bss = 0;
334 unsigned long error = ~0UL;
335 unsigned long total_size;
338 /* First of all, some simple consistency checks */
339 if (interp_elf_ex->e_type != ET_EXEC &&
340 interp_elf_ex->e_type != ET_DYN)
342 if (!elf_check_arch(interp_elf_ex))
344 if (!interpreter->f_op || !interpreter->f_op->mmap)
348 * If the size of this structure has changed, then punt, since
349 * we will be doing the wrong thing.
351 if (interp_elf_ex->e_phentsize != sizeof(struct elf_phdr))
353 if (interp_elf_ex->e_phnum > 65536U / sizeof(struct elf_phdr))
356 /* Now read in all of the header information */
358 size = sizeof(struct elf_phdr) * interp_elf_ex->e_phnum;
359 if (size > ELF_MIN_ALIGN)
361 elf_phdata = (struct elf_phdr *) kmalloc(size, GFP_KERNEL);
365 retval = kernel_read(interpreter,interp_elf_ex->e_phoff,(char *)elf_phdata,size);
370 total_size = total_mapping_size(elf_phdata, interp_elf_ex->e_phnum);
375 for (i=0; i<interp_elf_ex->e_phnum; i++, eppnt++) {
376 if (eppnt->p_type == PT_LOAD) {
377 int elf_type = MAP_PRIVATE | MAP_DENYWRITE;
379 unsigned long vaddr = 0;
380 unsigned long k, map_addr;
382 if (eppnt->p_flags & PF_R) elf_prot = PROT_READ;
383 if (eppnt->p_flags & PF_W) elf_prot |= PROT_WRITE;
384 if (eppnt->p_flags & PF_X) elf_prot |= PROT_EXEC;
385 vaddr = eppnt->p_vaddr;
386 if (interp_elf_ex->e_type == ET_EXEC || load_addr_set)
387 elf_type |= MAP_FIXED;
388 else if (no_base && interp_elf_ex->e_type == ET_DYN)
391 map_addr = elf_map(interpreter, load_addr + vaddr, eppnt, elf_prot, elf_type, total_size);
394 if (BAD_ADDR(map_addr))
397 if (!load_addr_set && interp_elf_ex->e_type == ET_DYN) {
398 load_addr = map_addr - ELF_PAGESTART(vaddr);
403 * Check to see if the section's size will overflow the
404 * allowed task size. Note that p_filesz must always be
405 * <= p_memsize so it is only necessary to check p_memsz.
407 k = load_addr + eppnt->p_vaddr;
408 if (k > TASK_SIZE || eppnt->p_filesz > eppnt->p_memsz ||
409 eppnt->p_memsz > TASK_SIZE || TASK_SIZE - eppnt->p_memsz < k) {
415 * Find the end of the file mapping for this phdr, and keep
416 * track of the largest address we see for this.
418 k = load_addr + eppnt->p_vaddr + eppnt->p_filesz;
423 * Do the same thing for the memory mapping - between
424 * elf_bss and last_bss is the bss section.
426 k = load_addr + eppnt->p_memsz + eppnt->p_vaddr;
433 * Now fill out the bss section. First pad the last page up
434 * to the page boundary, and then perform a mmap to make sure
435 * that there are zero-mapped pages up to and including the
439 elf_bss = ELF_PAGESTART(elf_bss + ELF_MIN_ALIGN - 1); /* What we have mapped so far */
441 /* Map the last of the bss segment */
442 if (last_bss > elf_bss) {
443 error = do_brk(elf_bss, last_bss - elf_bss);
448 *interp_load_addr = load_addr;
449 error = ((unsigned long) interp_elf_ex->e_entry) + load_addr;
457 static unsigned long load_aout_interp(struct exec * interp_ex,
458 struct file * interpreter)
460 unsigned long text_data, elf_entry = ~0UL;
464 current->mm->end_code = interp_ex->a_text;
465 text_data = interp_ex->a_text + interp_ex->a_data;
466 current->mm->end_data = text_data;
467 current->mm->brk = interp_ex->a_bss + text_data;
469 switch (N_MAGIC(*interp_ex)) {
472 addr = (char __user *)0;
476 offset = N_TXTOFF(*interp_ex);
477 addr = (char __user *) N_TXTADDR(*interp_ex);
483 do_brk(0, text_data);
484 if (!interpreter->f_op || !interpreter->f_op->read)
486 if (interpreter->f_op->read(interpreter, addr, text_data, &offset) < 0)
488 flush_icache_range((unsigned long)addr,
489 (unsigned long)addr + text_data);
491 do_brk(ELF_PAGESTART(text_data + ELF_MIN_ALIGN - 1),
493 elf_entry = interp_ex->a_entry;
500 * These are the functions used to load ELF style executables and shared
501 * libraries. There is no binary dependent code anywhere else.
504 #define INTERPRETER_NONE 0
505 #define INTERPRETER_AOUT 1
506 #define INTERPRETER_ELF 2
509 static int load_elf_binary(struct linux_binprm * bprm, struct pt_regs * regs)
511 struct file *interpreter = NULL; /* to shut gcc up */
512 unsigned long load_addr = 0, load_bias = 0;
513 int load_addr_set = 0;
514 char * elf_interpreter = NULL;
515 unsigned int interpreter_type = INTERPRETER_NONE;
516 unsigned char ibcs2_interpreter = 0;
518 struct elf_phdr * elf_ppnt, *elf_phdata;
519 unsigned long elf_bss, elf_brk;
523 unsigned long elf_entry, interp_load_addr = 0;
524 unsigned long start_code, end_code, start_data, end_data;
525 unsigned long reloc_func_desc = 0;
526 struct elfhdr elf_ex;
527 struct elfhdr interp_elf_ex;
528 struct exec interp_ex;
529 char passed_fileno[6];
530 struct files_struct *files;
531 int executable_stack, relocexec, old_relocexec = current->flags & PF_RELOCEXEC;
532 unsigned long def_flags = 0;
534 /* Get the exec-header */
535 elf_ex = *((struct elfhdr *) bprm->buf);
538 /* First of all, some simple consistency checks */
539 if (memcmp(elf_ex.e_ident, ELFMAG, SELFMAG) != 0)
542 if (elf_ex.e_type != ET_EXEC && elf_ex.e_type != ET_DYN)
544 if (!elf_check_arch(&elf_ex))
546 if (!bprm->file->f_op||!bprm->file->f_op->mmap)
549 /* Now read in all of the header information */
552 if (elf_ex.e_phentsize != sizeof(struct elf_phdr))
554 if (elf_ex.e_phnum > 65536U / sizeof(struct elf_phdr))
556 size = elf_ex.e_phnum * sizeof(struct elf_phdr);
557 elf_phdata = (struct elf_phdr *) kmalloc(size, GFP_KERNEL);
561 retval = kernel_read(bprm->file, elf_ex.e_phoff, (char *) elf_phdata, size);
565 files = current->files; /* Refcounted so ok */
566 retval = unshare_files();
569 if (files == current->files) {
570 put_files_struct(files);
574 /* exec will make our files private anyway, but for the a.out
575 loader stuff we need to do it earlier */
577 retval = get_unused_fd();
580 get_file(bprm->file);
581 fd_install(elf_exec_fileno = retval, bprm->file);
583 elf_ppnt = elf_phdata;
592 for (i = 0; i < elf_ex.e_phnum; i++) {
593 if (elf_ppnt->p_type == PT_INTERP) {
594 /* This is the program interpreter used for
595 * shared libraries - for now assume that this
596 * is an a.out format binary
600 if (elf_ppnt->p_filesz > PATH_MAX)
602 elf_interpreter = (char *) kmalloc(elf_ppnt->p_filesz,
604 if (!elf_interpreter)
607 retval = kernel_read(bprm->file, elf_ppnt->p_offset,
611 goto out_free_interp;
612 /* If the program interpreter is one of these two,
613 * then assume an iBCS2 image. Otherwise assume
614 * a native linux image.
616 if (strcmp(elf_interpreter,"/usr/lib/libc.so.1") == 0 ||
617 strcmp(elf_interpreter,"/usr/lib/ld.so.1") == 0)
618 ibcs2_interpreter = 1;
621 * The early SET_PERSONALITY here is so that the lookup
622 * for the interpreter happens in the namespace of the
623 * to-be-execed image. SET_PERSONALITY can select an
626 * However, SET_PERSONALITY is NOT allowed to switch
627 * this task into the new images's memory mapping
628 * policy - that is, TASK_SIZE must still evaluate to
629 * that which is appropriate to the execing application.
630 * This is because exit_mmap() needs to have TASK_SIZE
631 * evaluate to the size of the old image.
633 * So if (say) a 64-bit application is execing a 32-bit
634 * application it is the architecture's responsibility
635 * to defer changing the value of TASK_SIZE until the
636 * switch really is going to happen - do this in
637 * flush_thread(). - akpm
639 SET_PERSONALITY(elf_ex, ibcs2_interpreter);
641 interpreter = open_exec(elf_interpreter);
642 retval = PTR_ERR(interpreter);
643 if (IS_ERR(interpreter))
644 goto out_free_interp;
645 retval = kernel_read(interpreter, 0, bprm->buf, BINPRM_BUF_SIZE);
647 goto out_free_dentry;
649 /* Get the exec headers */
650 interp_ex = *((struct exec *) bprm->buf);
651 interp_elf_ex = *((struct elfhdr *) bprm->buf);
657 elf_ppnt = elf_phdata;
658 executable_stack = EXSTACK_DEFAULT;
660 for (i = 0; i < elf_ex.e_phnum; i++, elf_ppnt++)
661 if (elf_ppnt->p_type == PT_GNU_STACK) {
662 if (elf_ppnt->p_flags & PF_X)
663 executable_stack = EXSTACK_ENABLE_X;
665 executable_stack = EXSTACK_DISABLE_X;
668 if (i == elf_ex.e_phnum)
669 def_flags |= VM_EXEC | VM_MAYEXEC;
673 if (current->personality == PER_LINUX)
674 switch (exec_shield) {
676 if (executable_stack != EXSTACK_DEFAULT) {
677 current->flags |= PF_RELOCEXEC;
678 relocexec = PF_RELOCEXEC;
683 executable_stack = EXSTACK_DISABLE_X;
684 current->flags |= PF_RELOCEXEC;
685 relocexec = PF_RELOCEXEC;
689 /* Some simple consistency checks for the interpreter */
690 if (elf_interpreter) {
691 interpreter_type = INTERPRETER_ELF | INTERPRETER_AOUT;
693 /* Now figure out which format our binary is */
694 if ((N_MAGIC(interp_ex) != OMAGIC) &&
695 (N_MAGIC(interp_ex) != ZMAGIC) &&
696 (N_MAGIC(interp_ex) != QMAGIC))
697 interpreter_type = INTERPRETER_ELF;
699 if (memcmp(interp_elf_ex.e_ident, ELFMAG, SELFMAG) != 0)
700 interpreter_type &= ~INTERPRETER_ELF;
703 if (!interpreter_type)
704 goto out_free_dentry;
706 /* Make sure only one type was selected */
707 if ((interpreter_type & INTERPRETER_ELF) &&
708 interpreter_type != INTERPRETER_ELF) {
709 // FIXME - ratelimit this before re-enabling
710 // printk(KERN_WARNING "ELF: Ambiguous type, using ELF\n");
711 interpreter_type = INTERPRETER_ELF;
713 /* Verify the interpreter has a valid arch */
714 if ((interpreter_type == INTERPRETER_ELF) &&
715 !elf_check_arch(&interp_elf_ex))
716 goto out_free_dentry;
718 /* Executables without an interpreter also need a personality */
719 SET_PERSONALITY(elf_ex, ibcs2_interpreter);
722 /* OK, we are done with that, now set up the arg stuff,
723 and then start this sucker up */
725 if ((!bprm->sh_bang) && (interpreter_type == INTERPRETER_AOUT)) {
726 char *passed_p = passed_fileno;
727 sprintf(passed_fileno, "%d", elf_exec_fileno);
729 if (elf_interpreter) {
730 retval = copy_strings_kernel(1, &passed_p, bprm);
732 goto out_free_dentry;
737 /* Flush all traces of the currently running executable */
738 retval = flush_old_exec(bprm);
740 goto out_free_dentry;
741 current->flags |= relocexec;
745 * Turn off the CS limit completely if exec-shield disabled or
749 arch_add_exec_range(current->mm, -1);
752 /* Discard our unneeded old files struct */
755 put_files_struct(files);
759 /* OK, This is the point of no return */
760 current->mm->start_data = 0;
761 current->mm->end_data = 0;
762 current->mm->end_code = 0;
763 current->mm->mmap = NULL;
764 #ifdef __HAVE_ARCH_MMAP_TOP
765 current->mm->mmap_top = mmap_top();
767 current->flags &= ~PF_FORKNOEXEC;
768 current->mm->def_flags = def_flags;
770 /* Do this immediately, since STACK_TOP as used in setup_arg_pages
771 may depend on the personality. */
772 SET_PERSONALITY(elf_ex, ibcs2_interpreter);
774 /* Do this so that we can load the interpreter, if need be. We will
775 change some of these later */
776 // current->mm->rss = 0;
777 vx_rsspages_sub(current->mm, current->mm->rss);
778 current->mm->free_area_cache = TASK_UNMAPPED_BASE;
779 current->mm->non_executable_cache = current->mm->mmap_top;
780 retval = setup_arg_pages(bprm, executable_stack);
782 send_sig(SIGKILL, current, 0);
783 goto out_free_dentry;
786 current->mm->start_stack = bprm->p;
789 /* Now we do a little grungy work by mmaping the ELF image into
790 the correct location in memory.
793 for(i = 0, elf_ppnt = elf_phdata; i < elf_ex.e_phnum; i++, elf_ppnt++) {
794 int elf_prot = 0, elf_flags;
795 unsigned long k, vaddr;
797 if (elf_ppnt->p_type != PT_LOAD)
800 if (unlikely (elf_brk > elf_bss)) {
803 /* There was a PT_LOAD segment with p_memsz > p_filesz
804 before this one. Map anonymous pages, if needed,
805 and clear the area. */
806 retval = set_brk (elf_bss + load_bias,
807 elf_brk + load_bias);
809 send_sig(SIGKILL, current, 0);
810 goto out_free_dentry;
812 nbyte = ELF_PAGEOFFSET(elf_bss);
814 nbyte = ELF_MIN_ALIGN - nbyte;
815 if (nbyte > elf_brk - elf_bss)
816 nbyte = elf_brk - elf_bss;
817 clear_user((void __user *) elf_bss + load_bias, nbyte);
821 if (elf_ppnt->p_flags & PF_R) elf_prot |= PROT_READ;
822 if (elf_ppnt->p_flags & PF_W) elf_prot |= PROT_WRITE;
823 if (elf_ppnt->p_flags & PF_X) elf_prot |= PROT_EXEC;
825 elf_flags = MAP_PRIVATE|MAP_DENYWRITE|MAP_EXECUTABLE;
827 vaddr = elf_ppnt->p_vaddr;
828 if (elf_ex.e_type == ET_EXEC || load_addr_set)
829 elf_flags |= MAP_FIXED;
830 else if (elf_ex.e_type == ET_DYN)
834 load_bias = ELF_PAGESTART(ELF_ET_DYN_BASE - vaddr);
837 error = elf_map(bprm->file, load_bias + vaddr, elf_ppnt, elf_prot, elf_flags, 0);
841 if (!load_addr_set) {
843 load_addr = (elf_ppnt->p_vaddr - elf_ppnt->p_offset);
844 if (elf_ex.e_type == ET_DYN) {
846 ELF_PAGESTART(load_bias + vaddr);
847 load_addr += load_bias;
848 reloc_func_desc = load_bias;
851 k = elf_ppnt->p_vaddr;
852 if (k < start_code) start_code = k;
853 if (start_data < k) start_data = k;
856 * Check to see if the section's size will overflow the
857 * allowed task size. Note that p_filesz must always be
858 * <= p_memsz so it is only necessary to check p_memsz.
860 if (k > TASK_SIZE || elf_ppnt->p_filesz > elf_ppnt->p_memsz ||
861 elf_ppnt->p_memsz > TASK_SIZE ||
862 TASK_SIZE - elf_ppnt->p_memsz < k) {
863 /* set_brk can never work. Avoid overflows. */
864 send_sig(SIGKILL, current, 0);
865 goto out_free_dentry;
868 k = elf_ppnt->p_vaddr + elf_ppnt->p_filesz;
872 if ((elf_ppnt->p_flags & PF_X) && end_code < k)
876 k = elf_ppnt->p_vaddr + elf_ppnt->p_memsz;
881 elf_ex.e_entry += load_bias;
882 elf_bss += load_bias;
883 elf_brk += load_bias;
884 start_code += load_bias;
885 end_code += load_bias;
886 start_data += load_bias;
887 end_data += load_bias;
889 /* Calling set_brk effectively mmaps the pages that we need
890 * for the bss and break sections. We must do this before
891 * mapping in the interpreter, to make sure it doesn't wind
892 * up getting placed where the bss needs to go.
894 retval = set_brk(elf_bss, elf_brk);
896 send_sig(SIGKILL, current, 0);
897 goto out_free_dentry;
901 if (elf_interpreter) {
902 if (interpreter_type == INTERPRETER_AOUT)
903 elf_entry = load_aout_interp(&interp_ex,
906 elf_entry = load_elf_interp(&interp_elf_ex,
910 if (BAD_ADDR(elf_entry)) {
911 printk(KERN_ERR "Unable to load interpreter\n");
912 send_sig(SIGSEGV, current, 0);
913 retval = -ENOEXEC; /* Nobody gets to see this, but.. */
914 goto out_free_dentry;
916 reloc_func_desc = interp_load_addr;
918 allow_write_access(interpreter);
920 kfree(elf_interpreter);
922 elf_entry = elf_ex.e_entry;
927 if (interpreter_type != INTERPRETER_AOUT)
928 sys_close(elf_exec_fileno);
930 set_binfmt(&elf_format);
933 * Map the vsyscall trampoline. This address is then passed via
936 #ifdef __HAVE_ARCH_VSYSCALL
941 current->flags &= ~PF_FORKNOEXEC;
942 create_elf_tables(bprm, &elf_ex, (interpreter_type == INTERPRETER_AOUT),
943 load_addr, interp_load_addr);
944 /* N.B. passed_fileno might not be initialized? */
945 if (interpreter_type == INTERPRETER_AOUT)
946 current->mm->arg_start += strlen(passed_fileno) + 1;
947 current->mm->end_code = end_code;
948 current->mm->start_code = start_code;
949 current->mm->start_data = start_data;
950 current->mm->end_data = end_data;
951 current->mm->start_stack = bprm->p;
953 #ifdef __HAVE_ARCH_RANDOMIZE_BRK
954 if (current->flags & PF_RELOCEXEC)
955 randomize_brk(elf_brk);
957 if (current->personality & MMAP_PAGE_ZERO) {
958 /* Why this, you ask??? Well SVr4 maps page 0 as read-only,
959 and some applications "depend" upon this behavior.
960 Since we do not have the power to recompile these, we
961 emulate the SVr4 behavior. Sigh. */
962 down_write(¤t->mm->mmap_sem);
963 error = do_mmap(NULL, 0, PAGE_SIZE, PROT_READ | PROT_EXEC,
964 MAP_FIXED | MAP_PRIVATE, 0);
965 up_write(¤t->mm->mmap_sem);
970 * The ABI may specify that certain registers be set up in special
971 * ways (on i386 %edx is the address of a DT_FINI function, for
972 * example. In addition, it may also specify (eg, PowerPC64 ELF)
973 * that the e_entry field is the address of the function descriptor
974 * for the startup routine, rather than the address of the startup
975 * routine itself. This macro performs whatever initialization to
976 * the regs structure is required as well as any relocations to the
977 * function descriptor entries when executing dynamically links apps.
979 ELF_PLAT_INIT(regs, reloc_func_desc);
982 start_thread(regs, elf_entry, bprm->p);
983 if (unlikely(current->ptrace & PT_PTRACED)) {
984 if (current->ptrace & PT_TRACE_EXEC)
985 ptrace_notify ((PTRACE_EVENT_EXEC << 8) | SIGTRAP);
987 send_sig(SIGTRAP, current, 0);
995 allow_write_access(interpreter);
1000 kfree(elf_interpreter);
1002 sys_close(elf_exec_fileno);
1005 put_files_struct(current->files);
1006 current->files = files;
1010 current->flags &= ~PF_RELOCEXEC;
1011 current->flags |= old_relocexec;
1015 /* This is really simpleminded and specialized - we are loading an
1016 a.out library that is given an ELF header. */
1018 static int load_elf_library(struct file *file)
1020 struct elf_phdr *elf_phdata;
1021 unsigned long elf_bss, bss, len;
1022 int retval, error, i, j;
1023 struct elfhdr elf_ex;
1026 retval = kernel_read(file, 0, (char *) &elf_ex, sizeof(elf_ex));
1027 if (retval != sizeof(elf_ex))
1030 if (memcmp(elf_ex.e_ident, ELFMAG, SELFMAG) != 0)
1033 /* First of all, some simple consistency checks */
1034 if (elf_ex.e_type != ET_EXEC || elf_ex.e_phnum > 2 ||
1035 !elf_check_arch(&elf_ex) || !file->f_op || !file->f_op->mmap)
1038 /* Now read in all of the header information */
1040 j = sizeof(struct elf_phdr) * elf_ex.e_phnum;
1041 /* j < ELF_MIN_ALIGN because elf_ex.e_phnum <= 2 */
1044 elf_phdata = (struct elf_phdr *) kmalloc(j, GFP_KERNEL);
1049 retval = kernel_read(file, elf_ex.e_phoff, (char *) elf_phdata, j);
1053 for (j = 0, i = 0; i<elf_ex.e_phnum; i++)
1054 if ((elf_phdata + i)->p_type == PT_LOAD) j++;
1058 while (elf_phdata->p_type != PT_LOAD) elf_phdata++;
1060 /* Now use mmap to map the library into memory. */
1061 down_write(¤t->mm->mmap_sem);
1062 error = do_mmap(file,
1063 ELF_PAGESTART(elf_phdata->p_vaddr),
1064 (elf_phdata->p_filesz +
1065 ELF_PAGEOFFSET(elf_phdata->p_vaddr)),
1066 PROT_READ | PROT_WRITE | PROT_EXEC,
1067 MAP_FIXED | MAP_PRIVATE | MAP_DENYWRITE,
1068 (elf_phdata->p_offset -
1069 ELF_PAGEOFFSET(elf_phdata->p_vaddr)));
1070 up_write(¤t->mm->mmap_sem);
1071 if (error != ELF_PAGESTART(elf_phdata->p_vaddr))
1074 elf_bss = elf_phdata->p_vaddr + elf_phdata->p_filesz;
1077 len = ELF_PAGESTART(elf_phdata->p_filesz + elf_phdata->p_vaddr + ELF_MIN_ALIGN - 1);
1078 bss = elf_phdata->p_memsz + elf_phdata->p_vaddr;
1080 do_brk(len, bss - len);
1090 * Note that some platforms still use traditional core dumps and not
1091 * the ELF core dump. Each platform can select it as appropriate.
1093 #ifdef USE_ELF_CORE_DUMP
1098 * Modelled on fs/exec.c:aout_core_dump()
1099 * Jeremy Fitzhardinge <jeremy@sw.oz.au>
1102 * These are the only things you should do on a core-file: use only these
1103 * functions to write out all the necessary info.
1105 static int dump_write(struct file *file, const void *addr, int nr)
1107 return file->f_op->write(file, addr, nr, &file->f_pos) == nr;
1110 static int dump_seek(struct file *file, off_t off)
1112 if (file->f_op->llseek) {
1113 if (file->f_op->llseek(file, off, 0) != off)
1121 * Decide whether a segment is worth dumping; default is yes to be
1122 * sure (missing info is worse than too much; etc).
1123 * Personally I'd include everything, and use the coredump limit...
1125 * I think we should skip something. But I am not sure how. H.J.
1127 static int maydump(struct vm_area_struct *vma)
1130 * If we may not read the contents, don't allow us to dump
1131 * them either. "dump_write()" can't handle it anyway.
1133 if (!(vma->vm_flags & VM_READ))
1136 /* Do not dump I/O mapped devices! -DaveM */
1137 if (vma->vm_flags & VM_IO)
1140 if (vma->vm_flags & (VM_WRITE|VM_GROWSUP|VM_GROWSDOWN))
1142 if (vma->vm_flags & (VM_READ|VM_EXEC|VM_EXECUTABLE|VM_SHARED))
1148 #define roundup(x, y) ((((x)+((y)-1))/(y))*(y))
1150 /* An ELF note in memory */
1155 unsigned int datasz;
1159 static int notesize(struct memelfnote *en)
1163 sz = sizeof(struct elf_note);
1164 sz += roundup(strlen(en->name) + 1, 4);
1165 sz += roundup(en->datasz, 4);
1170 #define DUMP_WRITE(addr, nr) \
1171 do { if (!dump_write(file, (addr), (nr))) return 0; } while(0)
1172 #define DUMP_SEEK(off) \
1173 do { if (!dump_seek(file, (off))) return 0; } while(0)
1175 static int writenote(struct memelfnote *men, struct file *file)
1179 en.n_namesz = strlen(men->name) + 1;
1180 en.n_descsz = men->datasz;
1181 en.n_type = men->type;
1183 DUMP_WRITE(&en, sizeof(en));
1184 DUMP_WRITE(men->name, en.n_namesz);
1185 /* XXX - cast from long long to long to avoid need for libgcc.a */
1186 DUMP_SEEK(roundup((unsigned long)file->f_pos, 4)); /* XXX */
1187 DUMP_WRITE(men->data, men->datasz);
1188 DUMP_SEEK(roundup((unsigned long)file->f_pos, 4)); /* XXX */
1195 #define DUMP_WRITE(addr, nr) \
1196 if ((size += (nr)) > limit || !dump_write(file, (addr), (nr))) \
1198 #define DUMP_SEEK(off) \
1199 if (!dump_seek(file, (off))) \
1202 static inline void fill_elf_header(struct elfhdr *elf, int segs)
1204 memcpy(elf->e_ident, ELFMAG, SELFMAG);
1205 elf->e_ident[EI_CLASS] = ELF_CLASS;
1206 elf->e_ident[EI_DATA] = ELF_DATA;
1207 elf->e_ident[EI_VERSION] = EV_CURRENT;
1208 elf->e_ident[EI_OSABI] = ELF_OSABI;
1209 memset(elf->e_ident+EI_PAD, 0, EI_NIDENT-EI_PAD);
1211 elf->e_type = ET_CORE;
1212 elf->e_machine = ELF_ARCH;
1213 elf->e_version = EV_CURRENT;
1215 elf->e_phoff = sizeof(struct elfhdr);
1218 elf->e_ehsize = sizeof(struct elfhdr);
1219 elf->e_phentsize = sizeof(struct elf_phdr);
1220 elf->e_phnum = segs;
1221 elf->e_shentsize = 0;
1223 elf->e_shstrndx = 0;
1227 static inline void fill_elf_note_phdr(struct elf_phdr *phdr, int sz, off_t offset)
1229 phdr->p_type = PT_NOTE;
1230 phdr->p_offset = offset;
1233 phdr->p_filesz = sz;
1240 static void fill_note(struct memelfnote *note, const char *name, int type,
1241 unsigned int sz, void *data)
1251 * fill up all the fields in prstatus from the given task struct, except registers
1252 * which need to be filled up separately.
1254 static void fill_prstatus(struct elf_prstatus *prstatus,
1255 struct task_struct *p, long signr)
1257 prstatus->pr_info.si_signo = prstatus->pr_cursig = signr;
1258 prstatus->pr_sigpend = p->pending.signal.sig[0];
1259 prstatus->pr_sighold = p->blocked.sig[0];
1260 prstatus->pr_pid = p->pid;
1261 prstatus->pr_ppid = p->parent->pid;
1262 prstatus->pr_pgrp = process_group(p);
1263 prstatus->pr_sid = p->signal->session;
1264 jiffies_to_timeval(p->utime, &prstatus->pr_utime);
1265 jiffies_to_timeval(p->stime, &prstatus->pr_stime);
1266 jiffies_to_timeval(p->cutime, &prstatus->pr_cutime);
1267 jiffies_to_timeval(p->cstime, &prstatus->pr_cstime);
1270 static void fill_psinfo(struct elf_prpsinfo *psinfo, struct task_struct *p,
1271 struct mm_struct *mm)
1275 /* first copy the parameters from user space */
1276 memset(psinfo, 0, sizeof(struct elf_prpsinfo));
1278 len = mm->arg_end - mm->arg_start;
1279 if (len >= ELF_PRARGSZ)
1280 len = ELF_PRARGSZ-1;
1281 copy_from_user(&psinfo->pr_psargs,
1282 (const char __user *)mm->arg_start, len);
1283 for(i = 0; i < len; i++)
1284 if (psinfo->pr_psargs[i] == 0)
1285 psinfo->pr_psargs[i] = ' ';
1286 psinfo->pr_psargs[len] = 0;
1288 psinfo->pr_pid = p->pid;
1289 psinfo->pr_ppid = p->parent->pid;
1290 psinfo->pr_pgrp = process_group(p);
1291 psinfo->pr_sid = p->signal->session;
1293 i = p->state ? ffz(~p->state) + 1 : 0;
1294 psinfo->pr_state = i;
1295 psinfo->pr_sname = (i < 0 || i > 5) ? '.' : "RSDTZW"[i];
1296 psinfo->pr_zomb = psinfo->pr_sname == 'Z';
1297 psinfo->pr_nice = task_nice(p);
1298 psinfo->pr_flag = p->flags;
1299 SET_UID(psinfo->pr_uid, p->uid);
1300 SET_GID(psinfo->pr_gid, p->gid);
1301 strncpy(psinfo->pr_fname, p->comm, sizeof(psinfo->pr_fname));
1306 /* Here is the structure in which status of each thread is captured. */
1307 struct elf_thread_status
1309 struct list_head list;
1310 struct elf_prstatus prstatus; /* NT_PRSTATUS */
1311 elf_fpregset_t fpu; /* NT_PRFPREG */
1312 #ifdef ELF_CORE_COPY_XFPREGS
1313 elf_fpxregset_t xfpu; /* NT_PRXFPREG */
1315 struct memelfnote notes[3];
1320 * In order to add the specific thread information for the elf file format,
1321 * we need to keep a linked list of every threads pr_status and then
1322 * create a single section for them in the final core file.
1324 static int elf_dump_thread_status(long signr, struct task_struct * p, struct list_head * thread_list)
1327 struct elf_thread_status *t;
1330 t = kmalloc(sizeof(*t), GFP_ATOMIC);
1333 memset(t, 0, sizeof(*t));
1335 INIT_LIST_HEAD(&t->list);
1338 fill_prstatus(&t->prstatus, p, signr);
1339 elf_core_copy_task_regs(p, &t->prstatus.pr_reg);
1341 fill_note(&t->notes[0], "CORE", NT_PRSTATUS, sizeof(t->prstatus), &(t->prstatus));
1343 sz += notesize(&t->notes[0]);
1345 if ((t->prstatus.pr_fpvalid = elf_core_copy_task_fpregs(p, NULL, &t->fpu))) {
1346 fill_note(&t->notes[1], "CORE", NT_PRFPREG, sizeof(t->fpu), &(t->fpu));
1348 sz += notesize(&t->notes[1]);
1351 #ifdef ELF_CORE_COPY_XFPREGS
1352 if (elf_core_copy_task_xfpregs(p, &t->xfpu)) {
1353 fill_note(&t->notes[2], "LINUX", NT_PRXFPREG, sizeof(t->xfpu), &t->xfpu);
1355 sz += notesize(&t->notes[2]);
1358 list_add(&t->list, thread_list);
1365 * This is a two-pass process; first we find the offsets of the bits,
1366 * and then they are actually written out. If we run out of core limit
1369 static int elf_core_dump(long signr, struct pt_regs * regs, struct file * file)
1377 struct vm_area_struct *vma;
1378 struct elfhdr *elf = NULL;
1379 off_t offset = 0, dataoff;
1380 unsigned long limit = current->rlim[RLIMIT_CORE].rlim_cur;
1382 struct memelfnote *notes = NULL;
1383 struct elf_prstatus *prstatus = NULL; /* NT_PRSTATUS */
1384 struct elf_prpsinfo *psinfo = NULL; /* NT_PRPSINFO */
1385 struct task_struct *g, *p;
1386 LIST_HEAD(thread_list);
1387 struct list_head *t;
1388 elf_fpregset_t *fpu = NULL;
1389 #ifdef ELF_CORE_COPY_XFPREGS
1390 elf_fpxregset_t *xfpu = NULL;
1392 int thread_status_size = 0;
1396 * We no longer stop all VM operations.
1398 * This is because those proceses that could possibly change map_count or
1399 * the mmap / vma pages are now blocked in do_exit on current finishing
1402 * Only ptrace can touch these memory addresses, but it doesn't change
1403 * the map_count or the pages allocated. So no possibility of crashing
1404 * exists while dumping the mm->vm_next areas to the core file.
1407 /* alloc memory for large data structures: too large to be on stack */
1408 elf = kmalloc(sizeof(*elf), GFP_KERNEL);
1411 prstatus = kmalloc(sizeof(*prstatus), GFP_KERNEL);
1414 psinfo = kmalloc(sizeof(*psinfo), GFP_KERNEL);
1417 notes = kmalloc(NUM_NOTES * sizeof(struct memelfnote), GFP_KERNEL);
1420 fpu = kmalloc(sizeof(*fpu), GFP_KERNEL);
1423 #ifdef ELF_CORE_COPY_XFPREGS
1424 xfpu = kmalloc(sizeof(*xfpu), GFP_KERNEL);
1429 /* capture the status of all other threads */
1431 read_lock(&tasklist_lock);
1433 if (current->mm == p->mm && current != p) {
1434 int sz = elf_dump_thread_status(signr, p, &thread_list);
1436 read_unlock(&tasklist_lock);
1439 thread_status_size += sz;
1441 while_each_thread(g,p);
1442 read_unlock(&tasklist_lock);
1445 /* now collect the dump for the current */
1446 memset(prstatus, 0, sizeof(*prstatus));
1447 fill_prstatus(prstatus, current, signr);
1448 elf_core_copy_regs(&prstatus->pr_reg, regs);
1450 segs = current->mm->map_count;
1451 #ifdef ELF_CORE_EXTRA_PHDRS
1452 segs += ELF_CORE_EXTRA_PHDRS;
1456 fill_elf_header(elf, segs+1); /* including notes section */
1459 current->flags |= PF_DUMPCORE;
1462 * Set up the notes in similar form to SVR4 core dumps made
1463 * with info from their /proc.
1466 fill_note(notes +0, "CORE", NT_PRSTATUS, sizeof(*prstatus), prstatus);
1468 fill_psinfo(psinfo, current->group_leader, current->mm);
1469 fill_note(notes +1, "CORE", NT_PRPSINFO, sizeof(*psinfo), psinfo);
1471 fill_note(notes +2, "CORE", NT_TASKSTRUCT, sizeof(*current), current);
1475 auxv = (elf_addr_t *) current->mm->saved_auxv;
1480 while (auxv[i - 2] != AT_NULL);
1481 fill_note(¬es[numnote++], "CORE", NT_AUXV,
1482 i * sizeof (elf_addr_t), auxv);
1484 /* Try to dump the FPU. */
1485 if ((prstatus->pr_fpvalid = elf_core_copy_task_fpregs(current, regs, fpu)))
1486 fill_note(notes + numnote++,
1487 "CORE", NT_PRFPREG, sizeof(*fpu), fpu);
1488 #ifdef ELF_CORE_COPY_XFPREGS
1489 if (elf_core_copy_task_xfpregs(current, xfpu))
1490 fill_note(notes + numnote++,
1491 "LINUX", NT_PRXFPREG, sizeof(*xfpu), xfpu);
1497 DUMP_WRITE(elf, sizeof(*elf));
1498 offset += sizeof(*elf); /* Elf header */
1499 offset += (segs+1) * sizeof(struct elf_phdr); /* Program headers */
1501 /* Write notes phdr entry */
1503 struct elf_phdr phdr;
1506 for (i = 0; i < numnote; i++)
1507 sz += notesize(notes + i);
1509 sz += thread_status_size;
1511 fill_elf_note_phdr(&phdr, sz, offset);
1513 DUMP_WRITE(&phdr, sizeof(phdr));
1516 /* Page-align dumped data */
1517 dataoff = offset = roundup(offset, ELF_EXEC_PAGESIZE);
1519 /* Write program headers for segments dump */
1520 for (vma = current->mm->mmap; vma != NULL; vma = vma->vm_next) {
1521 struct elf_phdr phdr;
1524 sz = vma->vm_end - vma->vm_start;
1526 phdr.p_type = PT_LOAD;
1527 phdr.p_offset = offset;
1528 phdr.p_vaddr = vma->vm_start;
1530 phdr.p_filesz = maydump(vma) ? sz : 0;
1532 offset += phdr.p_filesz;
1533 phdr.p_flags = vma->vm_flags & VM_READ ? PF_R : 0;
1534 if (vma->vm_flags & VM_WRITE) phdr.p_flags |= PF_W;
1535 if (vma->vm_flags & VM_EXEC) phdr.p_flags |= PF_X;
1536 phdr.p_align = ELF_EXEC_PAGESIZE;
1538 DUMP_WRITE(&phdr, sizeof(phdr));
1541 #ifdef ELF_CORE_WRITE_EXTRA_PHDRS
1542 ELF_CORE_WRITE_EXTRA_PHDRS;
1545 /* write out the notes section */
1546 for (i = 0; i < numnote; i++)
1547 if (!writenote(notes + i, file))
1550 /* write out the thread status notes section */
1551 list_for_each(t, &thread_list) {
1552 struct elf_thread_status *tmp = list_entry(t, struct elf_thread_status, list);
1553 for (i = 0; i < tmp->num_notes; i++)
1554 if (!writenote(&tmp->notes[i], file))
1560 for (vma = current->mm->mmap; vma != NULL; vma = vma->vm_next) {
1566 for (addr = vma->vm_start;
1568 addr += PAGE_SIZE) {
1570 struct vm_area_struct *vma;
1572 if (get_user_pages(current, current->mm, addr, 1, 0, 1,
1573 &page, &vma) <= 0) {
1574 DUMP_SEEK (file->f_pos + PAGE_SIZE);
1576 if (page == ZERO_PAGE(addr)) {
1577 DUMP_SEEK (file->f_pos + PAGE_SIZE);
1580 flush_cache_page(vma, addr);
1582 if ((size += PAGE_SIZE) > limit ||
1583 !dump_write(file, kaddr,
1586 page_cache_release(page);
1591 page_cache_release(page);
1596 #ifdef ELF_CORE_WRITE_EXTRA_DATA
1597 ELF_CORE_WRITE_EXTRA_DATA;
1600 if ((off_t) file->f_pos != offset) {
1602 printk("elf_core_dump: file->f_pos (%ld) != offset (%ld)\n",
1603 (off_t) file->f_pos, offset);
1610 while(!list_empty(&thread_list)) {
1611 struct list_head *tmp = thread_list.next;
1613 kfree(list_entry(tmp, struct elf_thread_status, list));
1621 #ifdef ELF_CORE_COPY_XFPREGS
1628 #endif /* USE_ELF_CORE_DUMP */
1630 static int __init init_elf_binfmt(void)
1632 return register_binfmt(&elf_format);
1635 static void __exit exit_elf_binfmt(void)
1637 /* Remove the COFF and ELF loaders. */
1638 unregister_binfmt(&elf_format);
1641 core_initcall(init_elf_binfmt);
1642 module_exit(exit_elf_binfmt);
1643 MODULE_LICENSE("GPL");