2 * linux/fs/binfmt_elf.c
4 * These are the functions used to load ELF format executables as used
5 * on SVr4 machines. Information on the format may be found in the book
6 * "UNIX SYSTEM V RELEASE 4 Programmers Guide: Ansi C and Programming Support
9 * Copyright 1993, 1994: Eric Youngdale (ericy@cais.com).
12 #include <linux/module.h>
13 #include <linux/kernel.h>
15 #include <linux/stat.h>
16 #include <linux/time.h>
18 #include <linux/mman.h>
19 #include <linux/a.out.h>
20 #include <linux/errno.h>
21 #include <linux/signal.h>
22 #include <linux/binfmts.h>
23 #include <linux/string.h>
24 #include <linux/file.h>
25 #include <linux/fcntl.h>
26 #include <linux/ptrace.h>
27 #include <linux/slab.h>
28 #include <linux/shm.h>
29 #include <linux/personality.h>
30 #include <linux/elfcore.h>
31 #include <linux/init.h>
32 #include <linux/highuid.h>
33 #include <linux/smp.h>
34 #include <linux/smp_lock.h>
35 #include <linux/compiler.h>
36 #include <linux/highmem.h>
37 #include <linux/pagemap.h>
38 #include <linux/security.h>
39 #include <linux/syscalls.h>
40 #include <linux/vs_memory.h>
42 #include <asm/uaccess.h>
43 #include <asm/param.h>
45 #include <linux/elf.h>
47 static int load_elf_binary(struct linux_binprm * bprm, struct pt_regs * regs);
48 static int load_elf_library(struct file*);
49 static unsigned long elf_map (struct file *, unsigned long, struct elf_phdr *, int, int, unsigned long);
50 extern int dump_fpu (struct pt_regs *, elf_fpregset_t *);
53 #define elf_addr_t unsigned long
57 * If we don't support core dumping, then supply a NULL so we
60 #ifdef USE_ELF_CORE_DUMP
61 static int elf_core_dump(long signr, struct pt_regs * regs, struct file * file);
63 #define elf_core_dump NULL
66 #if ELF_EXEC_PAGESIZE > PAGE_SIZE
67 # define ELF_MIN_ALIGN ELF_EXEC_PAGESIZE
69 # define ELF_MIN_ALIGN PAGE_SIZE
72 #define ELF_PAGESTART(_v) ((_v) & ~(unsigned long)(ELF_MIN_ALIGN-1))
73 #define ELF_PAGEOFFSET(_v) ((_v) & (ELF_MIN_ALIGN-1))
74 #define ELF_PAGEALIGN(_v) (((_v) + ELF_MIN_ALIGN - 1) & ~(ELF_MIN_ALIGN - 1))
76 static struct linux_binfmt elf_format = {
77 .module = THIS_MODULE,
78 .load_binary = load_elf_binary,
79 .load_shlib = load_elf_library,
80 .core_dump = elf_core_dump,
81 .min_coredump = ELF_EXEC_PAGESIZE
84 #define BAD_ADDR(x) ((unsigned long)(x) > TASK_SIZE)
86 static int set_brk(unsigned long start, unsigned long end)
88 start = ELF_PAGEALIGN(start);
89 end = ELF_PAGEALIGN(end);
91 unsigned long addr = do_brk(start, end - start);
95 current->mm->start_brk = current->mm->brk = end;
100 /* We need to explicitly zero any fractional pages
101 after the data section (i.e. bss). This would
102 contain the junk from the file that should not
106 static void padzero(unsigned long elf_bss)
110 nbyte = ELF_PAGEOFFSET(elf_bss);
112 nbyte = ELF_MIN_ALIGN - nbyte;
113 clear_user((void __user *) elf_bss, nbyte);
117 /* Let's use some macros to make this stack manipulation a litle clearer */
118 #ifdef CONFIG_STACK_GROWSUP
119 #define STACK_ADD(sp, items) ((elf_addr_t __user *)(sp) + (items))
120 #define STACK_ROUND(sp, items) \
121 ((15 + (unsigned long) ((sp) + (items))) &~ 15UL)
122 #define STACK_ALLOC(sp, len) ({ elf_addr_t __user *old_sp = (elf_addr_t __user *)sp; sp += len; old_sp; })
124 #define STACK_ADD(sp, items) ((elf_addr_t __user *)(sp) - (items))
125 #define STACK_ROUND(sp, items) \
126 (((unsigned long) (sp - items)) &~ 15UL)
127 #define STACK_ALLOC(sp, len) ({ sp -= len ; sp; })
131 create_elf_tables(struct linux_binprm *bprm, struct elfhdr * exec,
132 int interp_aout, unsigned long load_addr,
133 unsigned long interp_load_addr)
135 unsigned long p = bprm->p;
136 int argc = bprm->argc;
137 int envc = bprm->envc;
138 elf_addr_t __user *argv;
139 elf_addr_t __user *envp;
140 elf_addr_t __user *sp;
141 elf_addr_t __user *u_platform;
142 const char *k_platform = ELF_PLATFORM;
144 elf_addr_t *elf_info;
146 struct task_struct *tsk = current;
149 * If this architecture has a platform capability string, copy it
150 * to userspace. In some cases (Sparc), this info is impossible
151 * for userspace to get any other way, in others (i386) it is
157 size_t len = strlen(k_platform) + 1;
159 #ifdef __HAVE_ARCH_ALIGN_STACK
160 p = (unsigned long)arch_align_stack((unsigned long)p);
162 u_platform = (elf_addr_t __user *)STACK_ALLOC(p, len);
163 __copy_to_user(u_platform, k_platform, len);
166 /* Create the ELF interpreter info */
167 elf_info = (elf_addr_t *) current->mm->saved_auxv;
168 #define NEW_AUX_ENT(id, val) \
169 do { elf_info[ei_index++] = id; elf_info[ei_index++] = val; } while (0)
173 * ARCH_DLINFO must come first so PPC can do its special alignment of
178 NEW_AUX_ENT(AT_HWCAP, ELF_HWCAP);
179 NEW_AUX_ENT(AT_PAGESZ, ELF_EXEC_PAGESIZE);
180 NEW_AUX_ENT(AT_CLKTCK, CLOCKS_PER_SEC);
181 NEW_AUX_ENT(AT_PHDR, load_addr + exec->e_phoff);
182 NEW_AUX_ENT(AT_PHENT, sizeof (struct elf_phdr));
183 NEW_AUX_ENT(AT_PHNUM, exec->e_phnum);
184 NEW_AUX_ENT(AT_BASE, interp_load_addr);
185 NEW_AUX_ENT(AT_FLAGS, 0);
186 NEW_AUX_ENT(AT_ENTRY, exec->e_entry);
187 NEW_AUX_ENT(AT_UID, (elf_addr_t) tsk->uid);
188 NEW_AUX_ENT(AT_EUID, (elf_addr_t) tsk->euid);
189 NEW_AUX_ENT(AT_GID, (elf_addr_t) tsk->gid);
190 NEW_AUX_ENT(AT_EGID, (elf_addr_t) tsk->egid);
191 NEW_AUX_ENT(AT_SECURE, (elf_addr_t) security_bprm_secureexec(bprm));
193 NEW_AUX_ENT(AT_PLATFORM, (elf_addr_t)(long)u_platform);
195 if (bprm->interp_flags & BINPRM_FLAGS_EXECFD) {
196 NEW_AUX_ENT(AT_EXECFD, (elf_addr_t) bprm->interp_data);
199 /* AT_NULL is zero; clear the rest too */
200 memset(&elf_info[ei_index], 0,
201 sizeof current->mm->saved_auxv - ei_index * sizeof elf_info[0]);
203 /* And advance past the AT_NULL entry. */
206 sp = STACK_ADD(p, ei_index);
208 items = (argc + 1) + (envc + 1);
210 items += 3; /* a.out interpreters require argv & envp too */
212 items += 1; /* ELF interpreters only put argc on the stack */
214 bprm->p = STACK_ROUND(sp, items);
216 /* Point sp at the lowest address on the stack */
217 #ifdef CONFIG_STACK_GROWSUP
218 sp = (elf_addr_t __user *)bprm->p - items - ei_index;
219 bprm->exec = (unsigned long) sp; /* XXX: PARISC HACK */
221 sp = (elf_addr_t __user *)bprm->p;
224 /* Now, let's put argc (and argv, envp if appropriate) on the stack */
225 __put_user(argc, sp++);
228 envp = argv + argc + 1;
229 __put_user((elf_addr_t)(long)argv, sp++);
230 __put_user((elf_addr_t)(long)envp, sp++);
233 envp = argv + argc + 1;
236 /* Populate argv and envp */
237 p = current->mm->arg_start;
240 __put_user((elf_addr_t)p, argv++);
241 len = strnlen_user((void __user *)p, PAGE_SIZE*MAX_ARG_PAGES);
242 if (!len || len > PAGE_SIZE*MAX_ARG_PAGES)
247 current->mm->arg_end = current->mm->env_start = p;
250 __put_user((elf_addr_t)p, envp++);
251 len = strnlen_user((void __user *)p, PAGE_SIZE*MAX_ARG_PAGES);
252 if (!len || len > PAGE_SIZE*MAX_ARG_PAGES)
257 current->mm->env_end = p;
259 /* Put the elf_info on the stack in the right place. */
260 sp = (elf_addr_t __user *)envp + 1;
261 copy_to_user(sp, elf_info, ei_index * sizeof(elf_addr_t));
266 static unsigned long elf_map(struct file *filep, unsigned long addr,
267 struct elf_phdr *eppnt, int prot, int type,
268 unsigned long total_size)
270 unsigned long map_addr;
271 unsigned long size = eppnt->p_filesz + ELF_PAGEOFFSET(eppnt->p_vaddr);
272 unsigned long off = eppnt->p_offset - ELF_PAGEOFFSET(eppnt->p_vaddr);
274 addr = ELF_PAGESTART(addr);
275 size = ELF_PAGEALIGN(size);
277 down_write(¤t->mm->mmap_sem);
280 * total_size is the size of the ELF (interpreter) image.
281 * The _first_ mmap needs to know the full size, otherwise
282 * randomization might put this image into an overlapping
283 * position with the ELF binary image. (since size < total_size)
284 * So we first map the 'big' image - and unmap the remainder at
285 * the end. (which unmap is needed for ELF images with holes.)
288 total_size = ELF_PAGEALIGN(total_size);
289 map_addr = do_mmap(filep, addr, total_size, prot, type, off);
290 if (!BAD_ADDR(map_addr))
291 do_munmap(current->mm, map_addr+size, total_size-size);
293 map_addr = do_mmap(filep, addr, size, prot, type, off);
295 up_write(¤t->mm->mmap_sem);
300 #endif /* !elf_map */
302 static inline unsigned long total_mapping_size(struct elf_phdr *cmds, int nr)
304 int i, first_idx = -1, last_idx = -1;
306 for (i = 0; i < nr; i++)
307 if (cmds[i].p_type == PT_LOAD) {
316 return cmds[last_idx].p_vaddr + cmds[last_idx].p_memsz -
317 ELF_PAGESTART(cmds[first_idx].p_vaddr);
320 /* This is much more generalized than the library routine read function,
321 so we keep this separate. Technically the library read function
322 is only provided so that we can read a.out libraries that have
325 static unsigned long load_elf_interp(struct elfhdr * interp_elf_ex,
326 struct file * interpreter,
327 unsigned long *interp_load_addr,
328 unsigned long no_base)
330 struct elf_phdr *elf_phdata;
331 struct elf_phdr *eppnt;
332 unsigned long load_addr = 0;
333 int load_addr_set = 0;
334 unsigned long last_bss = 0, elf_bss = 0;
335 unsigned long error = ~0UL;
336 unsigned long total_size;
339 /* First of all, some simple consistency checks */
340 if (interp_elf_ex->e_type != ET_EXEC &&
341 interp_elf_ex->e_type != ET_DYN)
343 if (!elf_check_arch(interp_elf_ex))
345 if (!interpreter->f_op || !interpreter->f_op->mmap)
349 * If the size of this structure has changed, then punt, since
350 * we will be doing the wrong thing.
352 if (interp_elf_ex->e_phentsize != sizeof(struct elf_phdr))
354 if (interp_elf_ex->e_phnum > 65536U / sizeof(struct elf_phdr))
357 /* Now read in all of the header information */
359 size = sizeof(struct elf_phdr) * interp_elf_ex->e_phnum;
360 if (size > ELF_MIN_ALIGN)
362 elf_phdata = (struct elf_phdr *) kmalloc(size, GFP_KERNEL);
366 retval = kernel_read(interpreter,interp_elf_ex->e_phoff,(char *)elf_phdata,size);
371 total_size = total_mapping_size(elf_phdata, interp_elf_ex->e_phnum);
376 for (i=0; i<interp_elf_ex->e_phnum; i++, eppnt++) {
377 if (eppnt->p_type == PT_LOAD) {
378 int elf_type = MAP_PRIVATE | MAP_DENYWRITE;
380 unsigned long vaddr = 0;
381 unsigned long k, map_addr;
383 if (eppnt->p_flags & PF_R) elf_prot = PROT_READ;
384 if (eppnt->p_flags & PF_W) elf_prot |= PROT_WRITE;
385 if (eppnt->p_flags & PF_X) elf_prot |= PROT_EXEC;
386 vaddr = eppnt->p_vaddr;
387 if (interp_elf_ex->e_type == ET_EXEC || load_addr_set)
388 elf_type |= MAP_FIXED;
389 else if (no_base && interp_elf_ex->e_type == ET_DYN)
392 map_addr = elf_map(interpreter, load_addr + vaddr, eppnt, elf_prot, elf_type, total_size);
395 if (BAD_ADDR(map_addr))
398 if (!load_addr_set && interp_elf_ex->e_type == ET_DYN) {
399 load_addr = map_addr - ELF_PAGESTART(vaddr);
404 * Check to see if the section's size will overflow the
405 * allowed task size. Note that p_filesz must always be
406 * <= p_memsize so it is only necessary to check p_memsz.
408 k = load_addr + eppnt->p_vaddr;
409 if (k > TASK_SIZE || eppnt->p_filesz > eppnt->p_memsz ||
410 eppnt->p_memsz > TASK_SIZE || TASK_SIZE - eppnt->p_memsz < k) {
416 * Find the end of the file mapping for this phdr, and keep
417 * track of the largest address we see for this.
419 k = load_addr + eppnt->p_vaddr + eppnt->p_filesz;
424 * Do the same thing for the memory mapping - between
425 * elf_bss and last_bss is the bss section.
427 k = load_addr + eppnt->p_memsz + eppnt->p_vaddr;
434 * Now fill out the bss section. First pad the last page up
435 * to the page boundary, and then perform a mmap to make sure
436 * that there are zero-mapped pages up to and including the
440 elf_bss = ELF_PAGESTART(elf_bss + ELF_MIN_ALIGN - 1); /* What we have mapped so far */
442 /* Map the last of the bss segment */
443 if (last_bss > elf_bss) {
444 error = do_brk(elf_bss, last_bss - elf_bss);
449 *interp_load_addr = load_addr;
450 error = ((unsigned long) interp_elf_ex->e_entry) + load_addr;
458 static unsigned long load_aout_interp(struct exec * interp_ex,
459 struct file * interpreter)
461 unsigned long text_data, elf_entry = ~0UL;
465 current->mm->end_code = interp_ex->a_text;
466 text_data = interp_ex->a_text + interp_ex->a_data;
467 current->mm->end_data = text_data;
468 current->mm->brk = interp_ex->a_bss + text_data;
470 switch (N_MAGIC(*interp_ex)) {
473 addr = (char __user *)0;
477 offset = N_TXTOFF(*interp_ex);
478 addr = (char __user *) N_TXTADDR(*interp_ex);
484 do_brk(0, text_data);
485 if (!interpreter->f_op || !interpreter->f_op->read)
487 if (interpreter->f_op->read(interpreter, addr, text_data, &offset) < 0)
489 flush_icache_range((unsigned long)addr,
490 (unsigned long)addr + text_data);
492 do_brk(ELF_PAGESTART(text_data + ELF_MIN_ALIGN - 1),
494 elf_entry = interp_ex->a_entry;
501 * These are the functions used to load ELF style executables and shared
502 * libraries. There is no binary dependent code anywhere else.
505 #define INTERPRETER_NONE 0
506 #define INTERPRETER_AOUT 1
507 #define INTERPRETER_ELF 2
510 static int load_elf_binary(struct linux_binprm * bprm, struct pt_regs * regs)
512 struct file *interpreter = NULL; /* to shut gcc up */
513 unsigned long load_addr = 0, load_bias = 0;
514 int load_addr_set = 0;
515 char * elf_interpreter = NULL;
516 unsigned int interpreter_type = INTERPRETER_NONE;
517 unsigned char ibcs2_interpreter = 0;
519 struct elf_phdr * elf_ppnt, *elf_phdata;
520 unsigned long elf_bss, elf_brk;
524 unsigned long elf_entry, interp_load_addr = 0;
525 unsigned long start_code, end_code, start_data, end_data;
526 unsigned long reloc_func_desc = 0;
527 struct elfhdr elf_ex;
528 struct elfhdr interp_elf_ex;
529 struct exec interp_ex;
530 char passed_fileno[6];
531 struct files_struct *files;
532 int executable_stack, relocexec, old_relocexec = current->flags & PF_RELOCEXEC;
533 unsigned long def_flags = 0;
535 /* Get the exec-header */
536 elf_ex = *((struct elfhdr *) bprm->buf);
539 /* First of all, some simple consistency checks */
540 if (memcmp(elf_ex.e_ident, ELFMAG, SELFMAG) != 0)
543 if (elf_ex.e_type != ET_EXEC && elf_ex.e_type != ET_DYN)
545 if (!elf_check_arch(&elf_ex))
547 if (!bprm->file->f_op||!bprm->file->f_op->mmap)
550 /* Now read in all of the header information */
553 if (elf_ex.e_phentsize != sizeof(struct elf_phdr))
555 if (elf_ex.e_phnum > 65536U / sizeof(struct elf_phdr))
557 size = elf_ex.e_phnum * sizeof(struct elf_phdr);
558 elf_phdata = (struct elf_phdr *) kmalloc(size, GFP_KERNEL);
562 retval = kernel_read(bprm->file, elf_ex.e_phoff, (char *) elf_phdata, size);
566 files = current->files; /* Refcounted so ok */
567 retval = unshare_files();
570 if (files == current->files) {
571 put_files_struct(files);
575 /* exec will make our files private anyway, but for the a.out
576 loader stuff we need to do it earlier */
578 retval = get_unused_fd();
581 get_file(bprm->file);
582 fd_install(elf_exec_fileno = retval, bprm->file);
584 elf_ppnt = elf_phdata;
593 for (i = 0; i < elf_ex.e_phnum; i++) {
594 if (elf_ppnt->p_type == PT_INTERP) {
595 /* This is the program interpreter used for
596 * shared libraries - for now assume that this
597 * is an a.out format binary
601 if (elf_ppnt->p_filesz > PATH_MAX)
603 elf_interpreter = (char *) kmalloc(elf_ppnt->p_filesz,
605 if (!elf_interpreter)
608 retval = kernel_read(bprm->file, elf_ppnt->p_offset,
612 goto out_free_interp;
613 /* If the program interpreter is one of these two,
614 * then assume an iBCS2 image. Otherwise assume
615 * a native linux image.
617 if (strcmp(elf_interpreter,"/usr/lib/libc.so.1") == 0 ||
618 strcmp(elf_interpreter,"/usr/lib/ld.so.1") == 0)
619 ibcs2_interpreter = 1;
622 * The early SET_PERSONALITY here is so that the lookup
623 * for the interpreter happens in the namespace of the
624 * to-be-execed image. SET_PERSONALITY can select an
627 * However, SET_PERSONALITY is NOT allowed to switch
628 * this task into the new images's memory mapping
629 * policy - that is, TASK_SIZE must still evaluate to
630 * that which is appropriate to the execing application.
631 * This is because exit_mmap() needs to have TASK_SIZE
632 * evaluate to the size of the old image.
634 * So if (say) a 64-bit application is execing a 32-bit
635 * application it is the architecture's responsibility
636 * to defer changing the value of TASK_SIZE until the
637 * switch really is going to happen - do this in
638 * flush_thread(). - akpm
640 SET_PERSONALITY(elf_ex, ibcs2_interpreter);
642 interpreter = open_exec(elf_interpreter);
643 retval = PTR_ERR(interpreter);
644 if (IS_ERR(interpreter))
645 goto out_free_interp;
646 retval = kernel_read(interpreter, 0, bprm->buf, BINPRM_BUF_SIZE);
648 goto out_free_dentry;
650 /* Get the exec headers */
651 interp_ex = *((struct exec *) bprm->buf);
652 interp_elf_ex = *((struct elfhdr *) bprm->buf);
658 elf_ppnt = elf_phdata;
659 executable_stack = EXSTACK_DEFAULT;
661 for (i = 0; i < elf_ex.e_phnum; i++, elf_ppnt++)
662 if (elf_ppnt->p_type == PT_GNU_STACK) {
663 if (elf_ppnt->p_flags & PF_X)
664 executable_stack = EXSTACK_ENABLE_X;
666 executable_stack = EXSTACK_DISABLE_X;
669 if (i == elf_ex.e_phnum)
670 def_flags |= VM_EXEC | VM_MAYEXEC;
674 if (current->personality == PER_LINUX)
675 switch (exec_shield) {
677 if (executable_stack != EXSTACK_DEFAULT) {
678 current->flags |= PF_RELOCEXEC;
679 relocexec = PF_RELOCEXEC;
684 executable_stack = EXSTACK_DISABLE_X;
685 current->flags |= PF_RELOCEXEC;
686 relocexec = PF_RELOCEXEC;
690 /* Some simple consistency checks for the interpreter */
691 if (elf_interpreter) {
692 interpreter_type = INTERPRETER_ELF | INTERPRETER_AOUT;
694 /* Now figure out which format our binary is */
695 if ((N_MAGIC(interp_ex) != OMAGIC) &&
696 (N_MAGIC(interp_ex) != ZMAGIC) &&
697 (N_MAGIC(interp_ex) != QMAGIC))
698 interpreter_type = INTERPRETER_ELF;
700 if (memcmp(interp_elf_ex.e_ident, ELFMAG, SELFMAG) != 0)
701 interpreter_type &= ~INTERPRETER_ELF;
704 if (!interpreter_type)
705 goto out_free_dentry;
707 /* Make sure only one type was selected */
708 if ((interpreter_type & INTERPRETER_ELF) &&
709 interpreter_type != INTERPRETER_ELF) {
710 // FIXME - ratelimit this before re-enabling
711 // printk(KERN_WARNING "ELF: Ambiguous type, using ELF\n");
712 interpreter_type = INTERPRETER_ELF;
714 /* Verify the interpreter has a valid arch */
715 if ((interpreter_type == INTERPRETER_ELF) &&
716 !elf_check_arch(&interp_elf_ex))
717 goto out_free_dentry;
719 /* Executables without an interpreter also need a personality */
720 SET_PERSONALITY(elf_ex, ibcs2_interpreter);
723 /* OK, we are done with that, now set up the arg stuff,
724 and then start this sucker up */
726 if ((!bprm->sh_bang) && (interpreter_type == INTERPRETER_AOUT)) {
727 char *passed_p = passed_fileno;
728 sprintf(passed_fileno, "%d", elf_exec_fileno);
730 if (elf_interpreter) {
731 retval = copy_strings_kernel(1, &passed_p, bprm);
733 goto out_free_dentry;
738 /* Flush all traces of the currently running executable */
739 retval = flush_old_exec(bprm);
741 goto out_free_dentry;
742 current->flags |= relocexec;
746 * Turn off the CS limit completely if exec-shield disabled or
750 arch_add_exec_range(current->mm, -1);
753 /* Discard our unneeded old files struct */
756 put_files_struct(files);
760 /* OK, This is the point of no return */
761 current->mm->start_data = 0;
762 current->mm->end_data = 0;
763 current->mm->end_code = 0;
764 current->mm->mmap = NULL;
765 #ifdef __HAVE_ARCH_MMAP_TOP
766 current->mm->mmap_top = mmap_top();
768 current->flags &= ~PF_FORKNOEXEC;
769 current->mm->def_flags = def_flags;
771 /* Do this immediately, since STACK_TOP as used in setup_arg_pages
772 may depend on the personality. */
773 SET_PERSONALITY(elf_ex, ibcs2_interpreter);
775 /* Do this so that we can load the interpreter, if need be. We will
776 change some of these later */
777 // current->mm->rss = 0;
778 vx_rsspages_sub(current->mm, current->mm->rss);
779 current->mm->free_area_cache = TASK_UNMAPPED_BASE;
780 current->mm->non_executable_cache = current->mm->mmap_top;
781 retval = setup_arg_pages(bprm, executable_stack);
783 send_sig(SIGKILL, current, 0);
784 goto out_free_dentry;
787 current->mm->start_stack = bprm->p;
790 /* Now we do a little grungy work by mmaping the ELF image into
791 the correct location in memory.
794 for(i = 0, elf_ppnt = elf_phdata; i < elf_ex.e_phnum; i++, elf_ppnt++) {
795 int elf_prot = 0, elf_flags;
796 unsigned long k, vaddr;
798 if (elf_ppnt->p_type != PT_LOAD)
801 if (unlikely (elf_brk > elf_bss)) {
804 /* There was a PT_LOAD segment with p_memsz > p_filesz
805 before this one. Map anonymous pages, if needed,
806 and clear the area. */
807 retval = set_brk (elf_bss + load_bias,
808 elf_brk + load_bias);
810 send_sig(SIGKILL, current, 0);
811 goto out_free_dentry;
813 nbyte = ELF_PAGEOFFSET(elf_bss);
815 nbyte = ELF_MIN_ALIGN - nbyte;
816 if (nbyte > elf_brk - elf_bss)
817 nbyte = elf_brk - elf_bss;
818 clear_user((void __user *) elf_bss + load_bias, nbyte);
822 if (elf_ppnt->p_flags & PF_R) elf_prot |= PROT_READ;
823 if (elf_ppnt->p_flags & PF_W) elf_prot |= PROT_WRITE;
824 if (elf_ppnt->p_flags & PF_X) elf_prot |= PROT_EXEC;
826 elf_flags = MAP_PRIVATE|MAP_DENYWRITE|MAP_EXECUTABLE;
828 vaddr = elf_ppnt->p_vaddr;
829 if (elf_ex.e_type == ET_EXEC || load_addr_set)
830 elf_flags |= MAP_FIXED;
831 else if (elf_ex.e_type == ET_DYN)
835 load_bias = ELF_PAGESTART(ELF_ET_DYN_BASE - vaddr);
838 error = elf_map(bprm->file, load_bias + vaddr, elf_ppnt, elf_prot, elf_flags, 0);
842 if (!load_addr_set) {
844 load_addr = (elf_ppnt->p_vaddr - elf_ppnt->p_offset);
845 if (elf_ex.e_type == ET_DYN) {
847 ELF_PAGESTART(load_bias + vaddr);
848 load_addr += load_bias;
849 reloc_func_desc = load_bias;
852 k = elf_ppnt->p_vaddr;
853 if (k < start_code) start_code = k;
854 if (start_data < k) start_data = k;
857 * Check to see if the section's size will overflow the
858 * allowed task size. Note that p_filesz must always be
859 * <= p_memsz so it is only necessary to check p_memsz.
861 if (k > TASK_SIZE || elf_ppnt->p_filesz > elf_ppnt->p_memsz ||
862 elf_ppnt->p_memsz > TASK_SIZE ||
863 TASK_SIZE - elf_ppnt->p_memsz < k) {
864 /* set_brk can never work. Avoid overflows. */
865 send_sig(SIGKILL, current, 0);
866 goto out_free_dentry;
869 k = elf_ppnt->p_vaddr + elf_ppnt->p_filesz;
873 if ((elf_ppnt->p_flags & PF_X) && end_code < k)
877 k = elf_ppnt->p_vaddr + elf_ppnt->p_memsz;
882 elf_ex.e_entry += load_bias;
883 elf_bss += load_bias;
884 elf_brk += load_bias;
885 start_code += load_bias;
886 end_code += load_bias;
887 start_data += load_bias;
888 end_data += load_bias;
890 /* Calling set_brk effectively mmaps the pages that we need
891 * for the bss and break sections. We must do this before
892 * mapping in the interpreter, to make sure it doesn't wind
893 * up getting placed where the bss needs to go.
895 retval = set_brk(elf_bss, elf_brk);
897 send_sig(SIGKILL, current, 0);
898 goto out_free_dentry;
902 if (elf_interpreter) {
903 if (interpreter_type == INTERPRETER_AOUT)
904 elf_entry = load_aout_interp(&interp_ex,
907 elf_entry = load_elf_interp(&interp_elf_ex,
911 if (BAD_ADDR(elf_entry)) {
912 printk(KERN_ERR "Unable to load interpreter\n");
913 send_sig(SIGSEGV, current, 0);
914 retval = -ENOEXEC; /* Nobody gets to see this, but.. */
915 goto out_free_dentry;
917 reloc_func_desc = interp_load_addr;
919 allow_write_access(interpreter);
921 kfree(elf_interpreter);
923 elf_entry = elf_ex.e_entry;
928 if (interpreter_type != INTERPRETER_AOUT)
929 sys_close(elf_exec_fileno);
931 set_binfmt(&elf_format);
934 * Map the vsyscall trampoline. This address is then passed via
937 #ifdef __HAVE_ARCH_VSYSCALL
942 current->flags &= ~PF_FORKNOEXEC;
943 create_elf_tables(bprm, &elf_ex, (interpreter_type == INTERPRETER_AOUT),
944 load_addr, interp_load_addr);
945 /* N.B. passed_fileno might not be initialized? */
946 if (interpreter_type == INTERPRETER_AOUT)
947 current->mm->arg_start += strlen(passed_fileno) + 1;
948 current->mm->end_code = end_code;
949 current->mm->start_code = start_code;
950 current->mm->start_data = start_data;
951 current->mm->end_data = end_data;
952 current->mm->start_stack = bprm->p;
954 #ifdef __HAVE_ARCH_RANDOMIZE_BRK
955 if (current->flags & PF_RELOCEXEC)
956 randomize_brk(elf_brk);
958 if (current->personality & MMAP_PAGE_ZERO) {
959 /* Why this, you ask??? Well SVr4 maps page 0 as read-only,
960 and some applications "depend" upon this behavior.
961 Since we do not have the power to recompile these, we
962 emulate the SVr4 behavior. Sigh. */
963 down_write(¤t->mm->mmap_sem);
964 error = do_mmap(NULL, 0, PAGE_SIZE, PROT_READ | PROT_EXEC,
965 MAP_FIXED | MAP_PRIVATE, 0);
966 up_write(¤t->mm->mmap_sem);
971 * The ABI may specify that certain registers be set up in special
972 * ways (on i386 %edx is the address of a DT_FINI function, for
973 * example. In addition, it may also specify (eg, PowerPC64 ELF)
974 * that the e_entry field is the address of the function descriptor
975 * for the startup routine, rather than the address of the startup
976 * routine itself. This macro performs whatever initialization to
977 * the regs structure is required as well as any relocations to the
978 * function descriptor entries when executing dynamically links apps.
980 ELF_PLAT_INIT(regs, reloc_func_desc);
983 start_thread(regs, elf_entry, bprm->p);
984 if (unlikely(current->ptrace & PT_PTRACED)) {
985 if (current->ptrace & PT_TRACE_EXEC)
986 ptrace_notify ((PTRACE_EVENT_EXEC << 8) | SIGTRAP);
988 send_sig(SIGTRAP, current, 0);
996 allow_write_access(interpreter);
1000 if (elf_interpreter)
1001 kfree(elf_interpreter);
1003 sys_close(elf_exec_fileno);
1006 put_files_struct(current->files);
1007 current->files = files;
1011 current->flags &= ~PF_RELOCEXEC;
1012 current->flags |= old_relocexec;
1016 /* This is really simpleminded and specialized - we are loading an
1017 a.out library that is given an ELF header. */
1019 static int load_elf_library(struct file *file)
1021 struct elf_phdr *elf_phdata;
1022 unsigned long elf_bss, bss, len;
1023 int retval, error, i, j;
1024 struct elfhdr elf_ex;
1027 retval = kernel_read(file, 0, (char *) &elf_ex, sizeof(elf_ex));
1028 if (retval != sizeof(elf_ex))
1031 if (memcmp(elf_ex.e_ident, ELFMAG, SELFMAG) != 0)
1034 /* First of all, some simple consistency checks */
1035 if (elf_ex.e_type != ET_EXEC || elf_ex.e_phnum > 2 ||
1036 !elf_check_arch(&elf_ex) || !file->f_op || !file->f_op->mmap)
1039 /* Now read in all of the header information */
1041 j = sizeof(struct elf_phdr) * elf_ex.e_phnum;
1042 /* j < ELF_MIN_ALIGN because elf_ex.e_phnum <= 2 */
1045 elf_phdata = (struct elf_phdr *) kmalloc(j, GFP_KERNEL);
1050 retval = kernel_read(file, elf_ex.e_phoff, (char *) elf_phdata, j);
1054 for (j = 0, i = 0; i<elf_ex.e_phnum; i++)
1055 if ((elf_phdata + i)->p_type == PT_LOAD) j++;
1059 while (elf_phdata->p_type != PT_LOAD) elf_phdata++;
1061 /* Now use mmap to map the library into memory. */
1062 down_write(¤t->mm->mmap_sem);
1063 error = do_mmap(file,
1064 ELF_PAGESTART(elf_phdata->p_vaddr),
1065 (elf_phdata->p_filesz +
1066 ELF_PAGEOFFSET(elf_phdata->p_vaddr)),
1067 PROT_READ | PROT_WRITE | PROT_EXEC,
1068 MAP_FIXED | MAP_PRIVATE | MAP_DENYWRITE,
1069 (elf_phdata->p_offset -
1070 ELF_PAGEOFFSET(elf_phdata->p_vaddr)));
1071 up_write(¤t->mm->mmap_sem);
1072 if (error != ELF_PAGESTART(elf_phdata->p_vaddr))
1075 elf_bss = elf_phdata->p_vaddr + elf_phdata->p_filesz;
1078 len = ELF_PAGESTART(elf_phdata->p_filesz + elf_phdata->p_vaddr + ELF_MIN_ALIGN - 1);
1079 bss = elf_phdata->p_memsz + elf_phdata->p_vaddr;
1081 do_brk(len, bss - len);
1091 * Note that some platforms still use traditional core dumps and not
1092 * the ELF core dump. Each platform can select it as appropriate.
1094 #ifdef USE_ELF_CORE_DUMP
1099 * Modelled on fs/exec.c:aout_core_dump()
1100 * Jeremy Fitzhardinge <jeremy@sw.oz.au>
1103 * These are the only things you should do on a core-file: use only these
1104 * functions to write out all the necessary info.
1106 static int dump_write(struct file *file, const void *addr, int nr)
1108 return file->f_op->write(file, addr, nr, &file->f_pos) == nr;
1111 static int dump_seek(struct file *file, off_t off)
1113 if (file->f_op->llseek) {
1114 if (file->f_op->llseek(file, off, 0) != off)
1122 * Decide whether a segment is worth dumping; default is yes to be
1123 * sure (missing info is worse than too much; etc).
1124 * Personally I'd include everything, and use the coredump limit...
1126 * I think we should skip something. But I am not sure how. H.J.
1128 static int maydump(struct vm_area_struct *vma)
1131 * If we may not read the contents, don't allow us to dump
1132 * them either. "dump_write()" can't handle it anyway.
1134 if (!(vma->vm_flags & VM_READ))
1137 /* Do not dump I/O mapped devices! -DaveM */
1138 if (vma->vm_flags & VM_IO)
1141 if (vma->vm_flags & (VM_WRITE|VM_GROWSUP|VM_GROWSDOWN))
1143 if (vma->vm_flags & (VM_READ|VM_EXEC|VM_EXECUTABLE|VM_SHARED))
1149 #define roundup(x, y) ((((x)+((y)-1))/(y))*(y))
1151 /* An ELF note in memory */
1156 unsigned int datasz;
1160 static int notesize(struct memelfnote *en)
1164 sz = sizeof(struct elf_note);
1165 sz += roundup(strlen(en->name) + 1, 4);
1166 sz += roundup(en->datasz, 4);
1171 #define DUMP_WRITE(addr, nr) \
1172 do { if (!dump_write(file, (addr), (nr))) return 0; } while(0)
1173 #define DUMP_SEEK(off) \
1174 do { if (!dump_seek(file, (off))) return 0; } while(0)
1176 static int writenote(struct memelfnote *men, struct file *file)
1180 en.n_namesz = strlen(men->name) + 1;
1181 en.n_descsz = men->datasz;
1182 en.n_type = men->type;
1184 DUMP_WRITE(&en, sizeof(en));
1185 DUMP_WRITE(men->name, en.n_namesz);
1186 /* XXX - cast from long long to long to avoid need for libgcc.a */
1187 DUMP_SEEK(roundup((unsigned long)file->f_pos, 4)); /* XXX */
1188 DUMP_WRITE(men->data, men->datasz);
1189 DUMP_SEEK(roundup((unsigned long)file->f_pos, 4)); /* XXX */
1196 #define DUMP_WRITE(addr, nr) \
1197 if ((size += (nr)) > limit || !dump_write(file, (addr), (nr))) \
1199 #define DUMP_SEEK(off) \
1200 if (!dump_seek(file, (off))) \
1203 static inline void fill_elf_header(struct elfhdr *elf, int segs)
1205 memcpy(elf->e_ident, ELFMAG, SELFMAG);
1206 elf->e_ident[EI_CLASS] = ELF_CLASS;
1207 elf->e_ident[EI_DATA] = ELF_DATA;
1208 elf->e_ident[EI_VERSION] = EV_CURRENT;
1209 elf->e_ident[EI_OSABI] = ELF_OSABI;
1210 memset(elf->e_ident+EI_PAD, 0, EI_NIDENT-EI_PAD);
1212 elf->e_type = ET_CORE;
1213 elf->e_machine = ELF_ARCH;
1214 elf->e_version = EV_CURRENT;
1216 elf->e_phoff = sizeof(struct elfhdr);
1219 elf->e_ehsize = sizeof(struct elfhdr);
1220 elf->e_phentsize = sizeof(struct elf_phdr);
1221 elf->e_phnum = segs;
1222 elf->e_shentsize = 0;
1224 elf->e_shstrndx = 0;
1228 static inline void fill_elf_note_phdr(struct elf_phdr *phdr, int sz, off_t offset)
1230 phdr->p_type = PT_NOTE;
1231 phdr->p_offset = offset;
1234 phdr->p_filesz = sz;
1241 static void fill_note(struct memelfnote *note, const char *name, int type,
1242 unsigned int sz, void *data)
1252 * fill up all the fields in prstatus from the given task struct, except registers
1253 * which need to be filled up separately.
1255 static void fill_prstatus(struct elf_prstatus *prstatus,
1256 struct task_struct *p, long signr)
1258 prstatus->pr_info.si_signo = prstatus->pr_cursig = signr;
1259 prstatus->pr_sigpend = p->pending.signal.sig[0];
1260 prstatus->pr_sighold = p->blocked.sig[0];
1261 prstatus->pr_pid = p->pid;
1262 prstatus->pr_ppid = p->parent->pid;
1263 prstatus->pr_pgrp = process_group(p);
1264 prstatus->pr_sid = p->signal->session;
1265 jiffies_to_timeval(p->utime, &prstatus->pr_utime);
1266 jiffies_to_timeval(p->stime, &prstatus->pr_stime);
1267 jiffies_to_timeval(p->cutime, &prstatus->pr_cutime);
1268 jiffies_to_timeval(p->cstime, &prstatus->pr_cstime);
1271 static void fill_psinfo(struct elf_prpsinfo *psinfo, struct task_struct *p,
1272 struct mm_struct *mm)
1276 /* first copy the parameters from user space */
1277 memset(psinfo, 0, sizeof(struct elf_prpsinfo));
1279 len = mm->arg_end - mm->arg_start;
1280 if (len >= ELF_PRARGSZ)
1281 len = ELF_PRARGSZ-1;
1282 copy_from_user(&psinfo->pr_psargs,
1283 (const char __user *)mm->arg_start, len);
1284 for(i = 0; i < len; i++)
1285 if (psinfo->pr_psargs[i] == 0)
1286 psinfo->pr_psargs[i] = ' ';
1287 psinfo->pr_psargs[len] = 0;
1289 psinfo->pr_pid = p->pid;
1290 psinfo->pr_ppid = p->parent->pid;
1291 psinfo->pr_pgrp = process_group(p);
1292 psinfo->pr_sid = p->signal->session;
1294 i = p->state ? ffz(~p->state) + 1 : 0;
1295 psinfo->pr_state = i;
1296 psinfo->pr_sname = (i < 0 || i > 5) ? '.' : "RSDTZW"[i];
1297 psinfo->pr_zomb = psinfo->pr_sname == 'Z';
1298 psinfo->pr_nice = task_nice(p);
1299 psinfo->pr_flag = p->flags;
1300 SET_UID(psinfo->pr_uid, p->uid);
1301 SET_GID(psinfo->pr_gid, p->gid);
1302 strncpy(psinfo->pr_fname, p->comm, sizeof(psinfo->pr_fname));
1307 /* Here is the structure in which status of each thread is captured. */
1308 struct elf_thread_status
1310 struct list_head list;
1311 struct elf_prstatus prstatus; /* NT_PRSTATUS */
1312 elf_fpregset_t fpu; /* NT_PRFPREG */
1313 #ifdef ELF_CORE_COPY_XFPREGS
1314 elf_fpxregset_t xfpu; /* NT_PRXFPREG */
1316 struct memelfnote notes[3];
1321 * In order to add the specific thread information for the elf file format,
1322 * we need to keep a linked list of every threads pr_status and then
1323 * create a single section for them in the final core file.
1325 static int elf_dump_thread_status(long signr, struct task_struct * p, struct list_head * thread_list)
1328 struct elf_thread_status *t;
1331 t = kmalloc(sizeof(*t), GFP_ATOMIC);
1334 memset(t, 0, sizeof(*t));
1336 INIT_LIST_HEAD(&t->list);
1339 fill_prstatus(&t->prstatus, p, signr);
1340 elf_core_copy_task_regs(p, &t->prstatus.pr_reg);
1342 fill_note(&t->notes[0], "CORE", NT_PRSTATUS, sizeof(t->prstatus), &(t->prstatus));
1344 sz += notesize(&t->notes[0]);
1346 if ((t->prstatus.pr_fpvalid = elf_core_copy_task_fpregs(p, NULL, &t->fpu))) {
1347 fill_note(&t->notes[1], "CORE", NT_PRFPREG, sizeof(t->fpu), &(t->fpu));
1349 sz += notesize(&t->notes[1]);
1352 #ifdef ELF_CORE_COPY_XFPREGS
1353 if (elf_core_copy_task_xfpregs(p, &t->xfpu)) {
1354 fill_note(&t->notes[2], "LINUX", NT_PRXFPREG, sizeof(t->xfpu), &t->xfpu);
1356 sz += notesize(&t->notes[2]);
1359 list_add(&t->list, thread_list);
1366 * This is a two-pass process; first we find the offsets of the bits,
1367 * and then they are actually written out. If we run out of core limit
1370 static int elf_core_dump(long signr, struct pt_regs * regs, struct file * file)
1378 struct vm_area_struct *vma;
1379 struct elfhdr *elf = NULL;
1380 off_t offset = 0, dataoff;
1381 unsigned long limit = current->rlim[RLIMIT_CORE].rlim_cur;
1383 struct memelfnote *notes = NULL;
1384 struct elf_prstatus *prstatus = NULL; /* NT_PRSTATUS */
1385 struct elf_prpsinfo *psinfo = NULL; /* NT_PRPSINFO */
1386 struct task_struct *g, *p;
1387 LIST_HEAD(thread_list);
1388 struct list_head *t;
1389 elf_fpregset_t *fpu = NULL;
1390 #ifdef ELF_CORE_COPY_XFPREGS
1391 elf_fpxregset_t *xfpu = NULL;
1393 int thread_status_size = 0;
1397 * We no longer stop all VM operations.
1399 * This is because those proceses that could possibly change map_count or
1400 * the mmap / vma pages are now blocked in do_exit on current finishing
1403 * Only ptrace can touch these memory addresses, but it doesn't change
1404 * the map_count or the pages allocated. So no possibility of crashing
1405 * exists while dumping the mm->vm_next areas to the core file.
1408 /* alloc memory for large data structures: too large to be on stack */
1409 elf = kmalloc(sizeof(*elf), GFP_KERNEL);
1412 prstatus = kmalloc(sizeof(*prstatus), GFP_KERNEL);
1415 psinfo = kmalloc(sizeof(*psinfo), GFP_KERNEL);
1418 notes = kmalloc(NUM_NOTES * sizeof(struct memelfnote), GFP_KERNEL);
1421 fpu = kmalloc(sizeof(*fpu), GFP_KERNEL);
1424 #ifdef ELF_CORE_COPY_XFPREGS
1425 xfpu = kmalloc(sizeof(*xfpu), GFP_KERNEL);
1430 /* capture the status of all other threads */
1432 read_lock(&tasklist_lock);
1434 if (current->mm == p->mm && current != p) {
1435 int sz = elf_dump_thread_status(signr, p, &thread_list);
1437 read_unlock(&tasklist_lock);
1440 thread_status_size += sz;
1442 while_each_thread(g,p);
1443 read_unlock(&tasklist_lock);
1446 /* now collect the dump for the current */
1447 memset(prstatus, 0, sizeof(*prstatus));
1448 fill_prstatus(prstatus, current, signr);
1449 elf_core_copy_regs(&prstatus->pr_reg, regs);
1451 segs = current->mm->map_count;
1452 #ifdef ELF_CORE_EXTRA_PHDRS
1453 segs += ELF_CORE_EXTRA_PHDRS;
1457 fill_elf_header(elf, segs+1); /* including notes section */
1460 current->flags |= PF_DUMPCORE;
1463 * Set up the notes in similar form to SVR4 core dumps made
1464 * with info from their /proc.
1467 fill_note(notes +0, "CORE", NT_PRSTATUS, sizeof(*prstatus), prstatus);
1469 fill_psinfo(psinfo, current->group_leader, current->mm);
1470 fill_note(notes +1, "CORE", NT_PRPSINFO, sizeof(*psinfo), psinfo);
1472 fill_note(notes +2, "CORE", NT_TASKSTRUCT, sizeof(*current), current);
1476 auxv = (elf_addr_t *) current->mm->saved_auxv;
1481 while (auxv[i - 2] != AT_NULL);
1482 fill_note(¬es[numnote++], "CORE", NT_AUXV,
1483 i * sizeof (elf_addr_t), auxv);
1485 /* Try to dump the FPU. */
1486 if ((prstatus->pr_fpvalid = elf_core_copy_task_fpregs(current, regs, fpu)))
1487 fill_note(notes + numnote++,
1488 "CORE", NT_PRFPREG, sizeof(*fpu), fpu);
1489 #ifdef ELF_CORE_COPY_XFPREGS
1490 if (elf_core_copy_task_xfpregs(current, xfpu))
1491 fill_note(notes + numnote++,
1492 "LINUX", NT_PRXFPREG, sizeof(*xfpu), xfpu);
1498 DUMP_WRITE(elf, sizeof(*elf));
1499 offset += sizeof(*elf); /* Elf header */
1500 offset += (segs+1) * sizeof(struct elf_phdr); /* Program headers */
1502 /* Write notes phdr entry */
1504 struct elf_phdr phdr;
1507 for (i = 0; i < numnote; i++)
1508 sz += notesize(notes + i);
1510 sz += thread_status_size;
1512 fill_elf_note_phdr(&phdr, sz, offset);
1514 DUMP_WRITE(&phdr, sizeof(phdr));
1517 /* Page-align dumped data */
1518 dataoff = offset = roundup(offset, ELF_EXEC_PAGESIZE);
1520 /* Write program headers for segments dump */
1521 for (vma = current->mm->mmap; vma != NULL; vma = vma->vm_next) {
1522 struct elf_phdr phdr;
1525 sz = vma->vm_end - vma->vm_start;
1527 phdr.p_type = PT_LOAD;
1528 phdr.p_offset = offset;
1529 phdr.p_vaddr = vma->vm_start;
1531 phdr.p_filesz = maydump(vma) ? sz : 0;
1533 offset += phdr.p_filesz;
1534 phdr.p_flags = vma->vm_flags & VM_READ ? PF_R : 0;
1535 if (vma->vm_flags & VM_WRITE) phdr.p_flags |= PF_W;
1536 if (vma->vm_flags & VM_EXEC) phdr.p_flags |= PF_X;
1537 phdr.p_align = ELF_EXEC_PAGESIZE;
1539 DUMP_WRITE(&phdr, sizeof(phdr));
1542 #ifdef ELF_CORE_WRITE_EXTRA_PHDRS
1543 ELF_CORE_WRITE_EXTRA_PHDRS;
1546 /* write out the notes section */
1547 for (i = 0; i < numnote; i++)
1548 if (!writenote(notes + i, file))
1551 /* write out the thread status notes section */
1552 list_for_each(t, &thread_list) {
1553 struct elf_thread_status *tmp = list_entry(t, struct elf_thread_status, list);
1554 for (i = 0; i < tmp->num_notes; i++)
1555 if (!writenote(&tmp->notes[i], file))
1561 for (vma = current->mm->mmap; vma != NULL; vma = vma->vm_next) {
1567 for (addr = vma->vm_start;
1569 addr += PAGE_SIZE) {
1571 struct vm_area_struct *vma;
1573 if (get_user_pages(current, current->mm, addr, 1, 0, 1,
1574 &page, &vma) <= 0) {
1575 DUMP_SEEK (file->f_pos + PAGE_SIZE);
1577 if (page == ZERO_PAGE(addr)) {
1578 DUMP_SEEK (file->f_pos + PAGE_SIZE);
1581 flush_cache_page(vma, addr);
1583 if ((size += PAGE_SIZE) > limit ||
1584 !dump_write(file, kaddr,
1587 page_cache_release(page);
1592 page_cache_release(page);
1597 #ifdef ELF_CORE_WRITE_EXTRA_DATA
1598 ELF_CORE_WRITE_EXTRA_DATA;
1601 if ((off_t) file->f_pos != offset) {
1603 printk("elf_core_dump: file->f_pos (%ld) != offset (%ld)\n",
1604 (off_t) file->f_pos, offset);
1611 while(!list_empty(&thread_list)) {
1612 struct list_head *tmp = thread_list.next;
1614 kfree(list_entry(tmp, struct elf_thread_status, list));
1622 #ifdef ELF_CORE_COPY_XFPREGS
1629 #endif /* USE_ELF_CORE_DUMP */
1631 static int __init init_elf_binfmt(void)
1633 return register_binfmt(&elf_format);
1636 static void __exit exit_elf_binfmt(void)
1638 /* Remove the COFF and ELF loaders. */
1639 unregister_binfmt(&elf_format);
1642 core_initcall(init_elf_binfmt);
1643 module_exit(exit_elf_binfmt);
1644 MODULE_LICENSE("GPL");