2 * linux/fs/binfmt_elf.c
4 * These are the functions used to load ELF format executables as used
5 * on SVr4 machines. Information on the format may be found in the book
6 * "UNIX SYSTEM V RELEASE 4 Programmers Guide: Ansi C and Programming Support
9 * Copyright 1993, 1994: Eric Youngdale (ericy@cais.com).
12 #include <linux/module.h>
13 #include <linux/kernel.h>
15 #include <linux/stat.h>
16 #include <linux/time.h>
18 #include <linux/mman.h>
19 #include <linux/a.out.h>
20 #include <linux/errno.h>
21 #include <linux/signal.h>
22 #include <linux/binfmts.h>
23 #include <linux/string.h>
24 #include <linux/file.h>
25 #include <linux/fcntl.h>
26 #include <linux/ptrace.h>
27 #include <linux/slab.h>
28 #include <linux/shm.h>
29 #include <linux/personality.h>
30 #include <linux/elfcore.h>
31 #include <linux/init.h>
32 #include <linux/highuid.h>
33 #include <linux/smp.h>
34 #include <linux/smp_lock.h>
35 #include <linux/compiler.h>
36 #include <linux/highmem.h>
37 #include <linux/pagemap.h>
38 #include <linux/security.h>
39 #include <linux/syscalls.h>
41 #include <asm/uaccess.h>
42 #include <asm/param.h>
44 #include <linux/elf.h>
46 static int load_elf_binary(struct linux_binprm * bprm, struct pt_regs * regs);
47 static int load_elf_library(struct file*);
48 static unsigned long elf_map (struct file *, unsigned long, struct elf_phdr *, int, int, unsigned long);
49 extern int dump_fpu (struct pt_regs *, elf_fpregset_t *);
52 #define elf_addr_t unsigned long
56 * If we don't support core dumping, then supply a NULL so we
59 #ifdef USE_ELF_CORE_DUMP
60 static int elf_core_dump(long signr, struct pt_regs * regs, struct file * file);
62 #define elf_core_dump NULL
65 #if ELF_EXEC_PAGESIZE > PAGE_SIZE
66 # define ELF_MIN_ALIGN ELF_EXEC_PAGESIZE
68 # define ELF_MIN_ALIGN PAGE_SIZE
71 #define ELF_PAGESTART(_v) ((_v) & ~(unsigned long)(ELF_MIN_ALIGN-1))
72 #define ELF_PAGEOFFSET(_v) ((_v) & (ELF_MIN_ALIGN-1))
73 #define ELF_PAGEALIGN(_v) (((_v) + ELF_MIN_ALIGN - 1) & ~(ELF_MIN_ALIGN - 1))
75 static struct linux_binfmt elf_format = {
76 .module = THIS_MODULE,
77 .load_binary = load_elf_binary,
78 .load_shlib = load_elf_library,
79 .core_dump = elf_core_dump,
80 .min_coredump = ELF_EXEC_PAGESIZE
83 #define BAD_ADDR(x) ((unsigned long)(x) > TASK_SIZE)
85 static int set_brk(unsigned long start, unsigned long end)
87 start = ELF_PAGEALIGN(start);
88 end = ELF_PAGEALIGN(end);
90 unsigned long addr = do_brk(start, end - start);
94 current->mm->start_brk = current->mm->brk = end;
99 /* We need to explicitly zero any fractional pages
100 after the data section (i.e. bss). This would
101 contain the junk from the file that should not
105 static void padzero(unsigned long elf_bss)
109 nbyte = ELF_PAGEOFFSET(elf_bss);
111 nbyte = ELF_MIN_ALIGN - nbyte;
112 clear_user((void __user *) elf_bss, nbyte);
116 /* Let's use some macros to make this stack manipulation a litle clearer */
117 #ifdef CONFIG_STACK_GROWSUP
118 #define STACK_ADD(sp, items) ((elf_addr_t __user *)(sp) + (items))
119 #define STACK_ROUND(sp, items) \
120 ((15 + (unsigned long) ((sp) + (items))) &~ 15UL)
121 #define STACK_ALLOC(sp, len) ({ elf_addr_t __user *old_sp = (elf_addr_t __user *)sp; sp += len; old_sp; })
123 #define STACK_ADD(sp, items) ((elf_addr_t __user *)(sp) - (items))
124 #define STACK_ROUND(sp, items) \
125 (((unsigned long) (sp - items)) &~ 15UL)
126 #define STACK_ALLOC(sp, len) ({ sp -= len ; sp; })
130 create_elf_tables(struct linux_binprm *bprm, struct elfhdr * exec,
131 int interp_aout, unsigned long load_addr,
132 unsigned long interp_load_addr)
134 unsigned long p = bprm->p;
135 int argc = bprm->argc;
136 int envc = bprm->envc;
137 elf_addr_t __user *argv;
138 elf_addr_t __user *envp;
139 elf_addr_t __user *sp;
140 elf_addr_t __user *u_platform;
141 const char *k_platform = ELF_PLATFORM;
143 elf_addr_t *elf_info;
145 struct task_struct *tsk = current;
148 * If this architecture has a platform capability string, copy it
149 * to userspace. In some cases (Sparc), this info is impossible
150 * for userspace to get any other way, in others (i386) it is
156 size_t len = strlen(k_platform) + 1;
158 #ifdef __HAVE_ARCH_ALIGN_STACK
159 p = (unsigned long)arch_align_stack((unsigned long)p);
161 u_platform = (elf_addr_t __user *)STACK_ALLOC(p, len);
162 __copy_to_user(u_platform, k_platform, len);
165 /* Create the ELF interpreter info */
166 elf_info = (elf_addr_t *) current->mm->saved_auxv;
167 #define NEW_AUX_ENT(id, val) \
168 do { elf_info[ei_index++] = id; elf_info[ei_index++] = val; } while (0)
172 * ARCH_DLINFO must come first so PPC can do its special alignment of
177 NEW_AUX_ENT(AT_HWCAP, ELF_HWCAP);
178 NEW_AUX_ENT(AT_PAGESZ, ELF_EXEC_PAGESIZE);
179 NEW_AUX_ENT(AT_CLKTCK, CLOCKS_PER_SEC);
180 NEW_AUX_ENT(AT_PHDR, load_addr + exec->e_phoff);
181 NEW_AUX_ENT(AT_PHENT, sizeof (struct elf_phdr));
182 NEW_AUX_ENT(AT_PHNUM, exec->e_phnum);
183 NEW_AUX_ENT(AT_BASE, interp_load_addr);
184 NEW_AUX_ENT(AT_FLAGS, 0);
185 NEW_AUX_ENT(AT_ENTRY, exec->e_entry);
186 NEW_AUX_ENT(AT_UID, (elf_addr_t) tsk->uid);
187 NEW_AUX_ENT(AT_EUID, (elf_addr_t) tsk->euid);
188 NEW_AUX_ENT(AT_GID, (elf_addr_t) tsk->gid);
189 NEW_AUX_ENT(AT_EGID, (elf_addr_t) tsk->egid);
190 NEW_AUX_ENT(AT_SECURE, (elf_addr_t) security_bprm_secureexec(bprm));
192 NEW_AUX_ENT(AT_PLATFORM, (elf_addr_t)(long)u_platform);
194 if (bprm->interp_flags & BINPRM_FLAGS_EXECFD) {
195 NEW_AUX_ENT(AT_EXECFD, (elf_addr_t) bprm->interp_data);
198 /* AT_NULL is zero; clear the rest too */
199 memset(&elf_info[ei_index], 0,
200 sizeof current->mm->saved_auxv - ei_index * sizeof elf_info[0]);
202 /* And advance past the AT_NULL entry. */
205 sp = STACK_ADD(p, ei_index);
207 items = (argc + 1) + (envc + 1);
209 items += 3; /* a.out interpreters require argv & envp too */
211 items += 1; /* ELF interpreters only put argc on the stack */
213 bprm->p = STACK_ROUND(sp, items);
215 /* Point sp at the lowest address on the stack */
216 #ifdef CONFIG_STACK_GROWSUP
217 sp = (elf_addr_t __user *)bprm->p - items - ei_index;
218 bprm->exec = (unsigned long) sp; /* XXX: PARISC HACK */
220 sp = (elf_addr_t __user *)bprm->p;
223 /* Now, let's put argc (and argv, envp if appropriate) on the stack */
224 __put_user(argc, sp++);
227 envp = argv + argc + 1;
228 __put_user((elf_addr_t)(long)argv, sp++);
229 __put_user((elf_addr_t)(long)envp, sp++);
232 envp = argv + argc + 1;
235 /* Populate argv and envp */
236 p = current->mm->arg_start;
239 __put_user((elf_addr_t)p, argv++);
240 len = strnlen_user((void __user *)p, PAGE_SIZE*MAX_ARG_PAGES);
241 if (!len || len > PAGE_SIZE*MAX_ARG_PAGES)
246 current->mm->arg_end = current->mm->env_start = p;
249 __put_user((elf_addr_t)p, envp++);
250 len = strnlen_user((void __user *)p, PAGE_SIZE*MAX_ARG_PAGES);
251 if (!len || len > PAGE_SIZE*MAX_ARG_PAGES)
256 current->mm->env_end = p;
258 /* Put the elf_info on the stack in the right place. */
259 sp = (elf_addr_t __user *)envp + 1;
260 copy_to_user(sp, elf_info, ei_index * sizeof(elf_addr_t));
265 static unsigned long elf_map(struct file *filep, unsigned long addr,
266 struct elf_phdr *eppnt, int prot, int type,
267 unsigned long total_size)
269 unsigned long map_addr;
270 unsigned long size = eppnt->p_filesz + ELF_PAGEOFFSET(eppnt->p_vaddr);
271 unsigned long off = eppnt->p_offset - ELF_PAGEOFFSET(eppnt->p_vaddr);
273 addr = ELF_PAGESTART(addr);
274 size = ELF_PAGEALIGN(size);
276 down_write(¤t->mm->mmap_sem);
279 * total_size is the size of the ELF (interpreter) image.
280 * The _first_ mmap needs to know the full size, otherwise
281 * randomization might put this image into an overlapping
282 * position with the ELF binary image. (since size < total_size)
283 * So we first map the 'big' image - and unmap the remainder at
284 * the end. (which unmap is needed for ELF images with holes.)
287 total_size = ELF_PAGEALIGN(total_size);
288 map_addr = do_mmap(filep, addr, total_size, prot, type, off);
289 if (!BAD_ADDR(map_addr))
290 do_munmap(current->mm, map_addr+size, total_size-size);
292 map_addr = do_mmap(filep, addr, size, prot, type, off);
294 up_write(¤t->mm->mmap_sem);
299 #endif /* !elf_map */
301 static inline unsigned long total_mapping_size(struct elf_phdr *cmds, int nr)
303 int i, first_idx = -1, last_idx = -1;
305 for (i = 0; i < nr; i++)
306 if (cmds[i].p_type == PT_LOAD) {
315 return cmds[last_idx].p_vaddr + cmds[last_idx].p_memsz -
316 ELF_PAGESTART(cmds[first_idx].p_vaddr);
319 /* This is much more generalized than the library routine read function,
320 so we keep this separate. Technically the library read function
321 is only provided so that we can read a.out libraries that have
324 static unsigned long load_elf_interp(struct elfhdr * interp_elf_ex,
325 struct file * interpreter,
326 unsigned long *interp_load_addr,
327 unsigned long no_base)
329 struct elf_phdr *elf_phdata;
330 struct elf_phdr *eppnt;
331 unsigned long load_addr = 0;
332 int load_addr_set = 0;
333 unsigned long last_bss = 0, elf_bss = 0;
334 unsigned long error = ~0UL;
335 unsigned long total_size;
338 /* First of all, some simple consistency checks */
339 if (interp_elf_ex->e_type != ET_EXEC &&
340 interp_elf_ex->e_type != ET_DYN)
342 if (!elf_check_arch(interp_elf_ex))
344 if (!interpreter->f_op || !interpreter->f_op->mmap)
348 * If the size of this structure has changed, then punt, since
349 * we will be doing the wrong thing.
351 if (interp_elf_ex->e_phentsize != sizeof(struct elf_phdr))
353 if (interp_elf_ex->e_phnum > 65536U / sizeof(struct elf_phdr))
356 /* Now read in all of the header information */
358 size = sizeof(struct elf_phdr) * interp_elf_ex->e_phnum;
359 if (size > ELF_MIN_ALIGN)
361 elf_phdata = (struct elf_phdr *) kmalloc(size, GFP_KERNEL);
365 retval = kernel_read(interpreter,interp_elf_ex->e_phoff,(char *)elf_phdata,size);
370 total_size = total_mapping_size(elf_phdata, interp_elf_ex->e_phnum);
375 for (i=0; i<interp_elf_ex->e_phnum; i++, eppnt++) {
376 if (eppnt->p_type == PT_LOAD) {
377 int elf_type = MAP_PRIVATE | MAP_DENYWRITE;
379 unsigned long vaddr = 0;
380 unsigned long k, map_addr;
382 if (eppnt->p_flags & PF_R) elf_prot = PROT_READ;
383 if (eppnt->p_flags & PF_W) elf_prot |= PROT_WRITE;
384 if (eppnt->p_flags & PF_X) elf_prot |= PROT_EXEC;
385 vaddr = eppnt->p_vaddr;
386 if (interp_elf_ex->e_type == ET_EXEC || load_addr_set)
387 elf_type |= MAP_FIXED;
388 else if (no_base && interp_elf_ex->e_type == ET_DYN)
391 map_addr = elf_map(interpreter, load_addr + vaddr, eppnt, elf_prot, elf_type, total_size);
394 if (BAD_ADDR(map_addr))
397 if (!load_addr_set && interp_elf_ex->e_type == ET_DYN) {
398 load_addr = map_addr - ELF_PAGESTART(vaddr);
403 * Check to see if the section's size will overflow the
404 * allowed task size. Note that p_filesz must always be
405 * <= p_memsize so it is only necessary to check p_memsz.
407 k = load_addr + eppnt->p_vaddr;
408 if (k > TASK_SIZE || eppnt->p_filesz > eppnt->p_memsz ||
409 eppnt->p_memsz > TASK_SIZE || TASK_SIZE - eppnt->p_memsz < k) {
415 * Find the end of the file mapping for this phdr, and keep
416 * track of the largest address we see for this.
418 k = load_addr + eppnt->p_vaddr + eppnt->p_filesz;
423 * Do the same thing for the memory mapping - between
424 * elf_bss and last_bss is the bss section.
426 k = load_addr + eppnt->p_memsz + eppnt->p_vaddr;
433 * Now fill out the bss section. First pad the last page up
434 * to the page boundary, and then perform a mmap to make sure
435 * that there are zero-mapped pages up to and including the
439 elf_bss = ELF_PAGESTART(elf_bss + ELF_MIN_ALIGN - 1); /* What we have mapped so far */
441 /* Map the last of the bss segment */
442 if (last_bss > elf_bss) {
443 error = do_brk(elf_bss, last_bss - elf_bss);
448 *interp_load_addr = load_addr;
449 error = ((unsigned long) interp_elf_ex->e_entry) + load_addr;
457 static unsigned long load_aout_interp(struct exec * interp_ex,
458 struct file * interpreter)
460 unsigned long text_data, elf_entry = ~0UL;
464 current->mm->end_code = interp_ex->a_text;
465 text_data = interp_ex->a_text + interp_ex->a_data;
466 current->mm->end_data = text_data;
467 current->mm->brk = interp_ex->a_bss + text_data;
469 switch (N_MAGIC(*interp_ex)) {
472 addr = (char __user *)0;
476 offset = N_TXTOFF(*interp_ex);
477 addr = (char __user *) N_TXTADDR(*interp_ex);
483 do_brk(0, text_data);
484 if (!interpreter->f_op || !interpreter->f_op->read)
486 if (interpreter->f_op->read(interpreter, addr, text_data, &offset) < 0)
488 flush_icache_range((unsigned long)addr,
489 (unsigned long)addr + text_data);
491 do_brk(ELF_PAGESTART(text_data + ELF_MIN_ALIGN - 1),
493 elf_entry = interp_ex->a_entry;
500 * These are the functions used to load ELF style executables and shared
501 * libraries. There is no binary dependent code anywhere else.
504 #define INTERPRETER_NONE 0
505 #define INTERPRETER_AOUT 1
506 #define INTERPRETER_ELF 2
509 static int load_elf_binary(struct linux_binprm * bprm, struct pt_regs * regs)
511 struct file *interpreter = NULL; /* to shut gcc up */
512 unsigned long load_addr = 0, load_bias = 0;
513 int load_addr_set = 0;
514 char * elf_interpreter = NULL;
515 unsigned int interpreter_type = INTERPRETER_NONE;
516 unsigned char ibcs2_interpreter = 0;
518 struct elf_phdr * elf_ppnt, *elf_phdata;
519 unsigned long elf_bss, elf_brk;
523 unsigned long elf_entry, interp_load_addr = 0;
524 unsigned long start_code, end_code, start_data, end_data;
525 unsigned long reloc_func_desc = 0;
526 struct elfhdr elf_ex;
527 struct elfhdr interp_elf_ex;
528 struct exec interp_ex;
529 char passed_fileno[6];
530 struct files_struct *files;
531 int executable_stack, relocexec, old_relocexec = current->flags & PF_RELOCEXEC;
532 unsigned long def_flags = 0;
534 /* Get the exec-header */
535 elf_ex = *((struct elfhdr *) bprm->buf);
538 /* First of all, some simple consistency checks */
539 if (memcmp(elf_ex.e_ident, ELFMAG, SELFMAG) != 0)
542 if (elf_ex.e_type != ET_EXEC && elf_ex.e_type != ET_DYN)
544 if (!elf_check_arch(&elf_ex))
546 if (!bprm->file->f_op||!bprm->file->f_op->mmap)
549 /* Now read in all of the header information */
552 if (elf_ex.e_phentsize != sizeof(struct elf_phdr))
554 if (elf_ex.e_phnum > 65536U / sizeof(struct elf_phdr))
556 size = elf_ex.e_phnum * sizeof(struct elf_phdr);
557 elf_phdata = (struct elf_phdr *) kmalloc(size, GFP_KERNEL);
561 retval = kernel_read(bprm->file, elf_ex.e_phoff, (char *) elf_phdata, size);
565 files = current->files; /* Refcounted so ok */
566 retval = unshare_files();
569 if (files == current->files) {
570 put_files_struct(files);
574 /* exec will make our files private anyway, but for the a.out
575 loader stuff we need to do it earlier */
577 retval = get_unused_fd();
580 get_file(bprm->file);
581 fd_install(elf_exec_fileno = retval, bprm->file);
583 elf_ppnt = elf_phdata;
592 for (i = 0; i < elf_ex.e_phnum; i++) {
593 if (elf_ppnt->p_type == PT_INTERP) {
594 /* This is the program interpreter used for
595 * shared libraries - for now assume that this
596 * is an a.out format binary
600 if (elf_ppnt->p_filesz > PATH_MAX)
602 elf_interpreter = (char *) kmalloc(elf_ppnt->p_filesz,
604 if (!elf_interpreter)
607 retval = kernel_read(bprm->file, elf_ppnt->p_offset,
611 goto out_free_interp;
612 /* If the program interpreter is one of these two,
613 * then assume an iBCS2 image. Otherwise assume
614 * a native linux image.
616 if (strcmp(elf_interpreter,"/usr/lib/libc.so.1") == 0 ||
617 strcmp(elf_interpreter,"/usr/lib/ld.so.1") == 0)
618 ibcs2_interpreter = 1;
621 * The early SET_PERSONALITY here is so that the lookup
622 * for the interpreter happens in the namespace of the
623 * to-be-execed image. SET_PERSONALITY can select an
626 * However, SET_PERSONALITY is NOT allowed to switch
627 * this task into the new images's memory mapping
628 * policy - that is, TASK_SIZE must still evaluate to
629 * that which is appropriate to the execing application.
630 * This is because exit_mmap() needs to have TASK_SIZE
631 * evaluate to the size of the old image.
633 * So if (say) a 64-bit application is execing a 32-bit
634 * application it is the architecture's responsibility
635 * to defer changing the value of TASK_SIZE until the
636 * switch really is going to happen - do this in
637 * flush_thread(). - akpm
639 SET_PERSONALITY(elf_ex, ibcs2_interpreter);
641 interpreter = open_exec(elf_interpreter);
642 retval = PTR_ERR(interpreter);
643 if (IS_ERR(interpreter))
644 goto out_free_interp;
645 retval = kernel_read(interpreter, 0, bprm->buf, BINPRM_BUF_SIZE);
647 goto out_free_dentry;
649 /* Get the exec headers */
650 interp_ex = *((struct exec *) bprm->buf);
651 interp_elf_ex = *((struct elfhdr *) bprm->buf);
657 elf_ppnt = elf_phdata;
658 executable_stack = EXSTACK_DEFAULT;
660 for (i = 0; i < elf_ex.e_phnum; i++, elf_ppnt++)
661 if (elf_ppnt->p_type == PT_GNU_STACK) {
662 if (elf_ppnt->p_flags & PF_X)
663 executable_stack = EXSTACK_ENABLE_X;
665 executable_stack = EXSTACK_DISABLE_X;
668 if (i == elf_ex.e_phnum)
669 def_flags |= VM_EXEC | VM_MAYEXEC;
673 if (current->personality == PER_LINUX)
674 switch (exec_shield) {
676 if (executable_stack != EXSTACK_DEFAULT) {
677 current->flags |= PF_RELOCEXEC;
678 relocexec = PF_RELOCEXEC;
683 executable_stack = EXSTACK_DISABLE_X;
684 current->flags |= PF_RELOCEXEC;
685 relocexec = PF_RELOCEXEC;
689 /* Some simple consistency checks for the interpreter */
690 if (elf_interpreter) {
691 interpreter_type = INTERPRETER_ELF | INTERPRETER_AOUT;
693 /* Now figure out which format our binary is */
694 if ((N_MAGIC(interp_ex) != OMAGIC) &&
695 (N_MAGIC(interp_ex) != ZMAGIC) &&
696 (N_MAGIC(interp_ex) != QMAGIC))
697 interpreter_type = INTERPRETER_ELF;
699 if (memcmp(interp_elf_ex.e_ident, ELFMAG, SELFMAG) != 0)
700 interpreter_type &= ~INTERPRETER_ELF;
703 if (!interpreter_type)
704 goto out_free_dentry;
706 /* Make sure only one type was selected */
707 if ((interpreter_type & INTERPRETER_ELF) &&
708 interpreter_type != INTERPRETER_ELF) {
709 // FIXME - ratelimit this before re-enabling
710 // printk(KERN_WARNING "ELF: Ambiguous type, using ELF\n");
711 interpreter_type = INTERPRETER_ELF;
713 /* Verify the interpreter has a valid arch */
714 if ((interpreter_type == INTERPRETER_ELF) &&
715 !elf_check_arch(&interp_elf_ex))
716 goto out_free_dentry;
718 /* Executables without an interpreter also need a personality */
719 SET_PERSONALITY(elf_ex, ibcs2_interpreter);
722 /* OK, we are done with that, now set up the arg stuff,
723 and then start this sucker up */
725 if ((!bprm->sh_bang) && (interpreter_type == INTERPRETER_AOUT)) {
726 char *passed_p = passed_fileno;
727 sprintf(passed_fileno, "%d", elf_exec_fileno);
729 if (elf_interpreter) {
730 retval = copy_strings_kernel(1, &passed_p, bprm);
732 goto out_free_dentry;
737 /* Flush all traces of the currently running executable */
738 retval = flush_old_exec(bprm);
740 goto out_free_dentry;
741 current->flags |= relocexec;
745 * Turn off the CS limit completely if exec-shield disabled or
749 arch_add_exec_range(current->mm, -1);
752 /* Discard our unneeded old files struct */
755 put_files_struct(files);
759 /* OK, This is the point of no return */
760 current->mm->start_data = 0;
761 current->mm->end_data = 0;
762 current->mm->end_code = 0;
763 current->mm->mmap = NULL;
764 #ifdef __HAVE_ARCH_MMAP_TOP
765 current->mm->mmap_top = mmap_top();
767 current->flags &= ~PF_FORKNOEXEC;
768 current->mm->def_flags = def_flags;
770 /* Do this immediately, since STACK_TOP as used in setup_arg_pages
771 may depend on the personality. */
772 SET_PERSONALITY(elf_ex, ibcs2_interpreter);
774 /* Do this so that we can load the interpreter, if need be. We will
775 change some of these later */
776 current->mm->rss = 0;
777 current->mm->free_area_cache = TASK_UNMAPPED_BASE;
778 current->mm->non_executable_cache = current->mm->mmap_top;
779 retval = setup_arg_pages(bprm, executable_stack);
781 send_sig(SIGKILL, current, 0);
782 goto out_free_dentry;
785 current->mm->start_stack = bprm->p;
788 /* Now we do a little grungy work by mmaping the ELF image into
789 the correct location in memory.
792 for(i = 0, elf_ppnt = elf_phdata; i < elf_ex.e_phnum; i++, elf_ppnt++) {
793 int elf_prot = 0, elf_flags;
794 unsigned long k, vaddr;
796 if (elf_ppnt->p_type != PT_LOAD)
799 if (unlikely (elf_brk > elf_bss)) {
802 /* There was a PT_LOAD segment with p_memsz > p_filesz
803 before this one. Map anonymous pages, if needed,
804 and clear the area. */
805 retval = set_brk (elf_bss + load_bias,
806 elf_brk + load_bias);
808 send_sig(SIGKILL, current, 0);
809 goto out_free_dentry;
811 nbyte = ELF_PAGEOFFSET(elf_bss);
813 nbyte = ELF_MIN_ALIGN - nbyte;
814 if (nbyte > elf_brk - elf_bss)
815 nbyte = elf_brk - elf_bss;
816 clear_user((void __user *) elf_bss + load_bias, nbyte);
820 if (elf_ppnt->p_flags & PF_R) elf_prot |= PROT_READ;
821 if (elf_ppnt->p_flags & PF_W) elf_prot |= PROT_WRITE;
822 if (elf_ppnt->p_flags & PF_X) elf_prot |= PROT_EXEC;
824 elf_flags = MAP_PRIVATE|MAP_DENYWRITE|MAP_EXECUTABLE;
826 vaddr = elf_ppnt->p_vaddr;
827 if (elf_ex.e_type == ET_EXEC || load_addr_set)
828 elf_flags |= MAP_FIXED;
829 else if (elf_ex.e_type == ET_DYN)
833 load_bias = ELF_PAGESTART(ELF_ET_DYN_BASE - vaddr);
836 error = elf_map(bprm->file, load_bias + vaddr, elf_ppnt, elf_prot, elf_flags, 0);
840 if (!load_addr_set) {
842 load_addr = (elf_ppnt->p_vaddr - elf_ppnt->p_offset);
843 if (elf_ex.e_type == ET_DYN) {
845 ELF_PAGESTART(load_bias + vaddr);
846 load_addr += load_bias;
847 reloc_func_desc = load_bias;
850 k = elf_ppnt->p_vaddr;
851 if (k < start_code) start_code = k;
852 if (start_data < k) start_data = k;
855 * Check to see if the section's size will overflow the
856 * allowed task size. Note that p_filesz must always be
857 * <= p_memsz so it is only necessary to check p_memsz.
859 if (k > TASK_SIZE || elf_ppnt->p_filesz > elf_ppnt->p_memsz ||
860 elf_ppnt->p_memsz > TASK_SIZE ||
861 TASK_SIZE - elf_ppnt->p_memsz < k) {
862 /* set_brk can never work. Avoid overflows. */
863 send_sig(SIGKILL, current, 0);
864 goto out_free_dentry;
867 k = elf_ppnt->p_vaddr + elf_ppnt->p_filesz;
871 if ((elf_ppnt->p_flags & PF_X) && end_code < k)
875 k = elf_ppnt->p_vaddr + elf_ppnt->p_memsz;
880 elf_ex.e_entry += load_bias;
881 elf_bss += load_bias;
882 elf_brk += load_bias;
883 start_code += load_bias;
884 end_code += load_bias;
885 start_data += load_bias;
886 end_data += load_bias;
888 /* Calling set_brk effectively mmaps the pages that we need
889 * for the bss and break sections. We must do this before
890 * mapping in the interpreter, to make sure it doesn't wind
891 * up getting placed where the bss needs to go.
893 retval = set_brk(elf_bss, elf_brk);
895 send_sig(SIGKILL, current, 0);
896 goto out_free_dentry;
900 if (elf_interpreter) {
901 if (interpreter_type == INTERPRETER_AOUT)
902 elf_entry = load_aout_interp(&interp_ex,
905 elf_entry = load_elf_interp(&interp_elf_ex,
909 if (BAD_ADDR(elf_entry)) {
910 printk(KERN_ERR "Unable to load interpreter\n");
911 send_sig(SIGSEGV, current, 0);
912 retval = -ENOEXEC; /* Nobody gets to see this, but.. */
913 goto out_free_dentry;
915 reloc_func_desc = interp_load_addr;
917 allow_write_access(interpreter);
919 kfree(elf_interpreter);
921 elf_entry = elf_ex.e_entry;
926 if (interpreter_type != INTERPRETER_AOUT)
927 sys_close(elf_exec_fileno);
929 set_binfmt(&elf_format);
932 * Map the vsyscall trampoline. This address is then passed via
935 #ifdef __HAVE_ARCH_VSYSCALL
940 current->flags &= ~PF_FORKNOEXEC;
941 create_elf_tables(bprm, &elf_ex, (interpreter_type == INTERPRETER_AOUT),
942 load_addr, interp_load_addr);
943 /* N.B. passed_fileno might not be initialized? */
944 if (interpreter_type == INTERPRETER_AOUT)
945 current->mm->arg_start += strlen(passed_fileno) + 1;
946 current->mm->end_code = end_code;
947 current->mm->start_code = start_code;
948 current->mm->start_data = start_data;
949 current->mm->end_data = end_data;
950 current->mm->start_stack = bprm->p;
952 #ifdef __HAVE_ARCH_RANDOMIZE_BRK
953 if (current->flags & PF_RELOCEXEC)
954 randomize_brk(elf_brk);
956 if (current->personality & MMAP_PAGE_ZERO) {
957 /* Why this, you ask??? Well SVr4 maps page 0 as read-only,
958 and some applications "depend" upon this behavior.
959 Since we do not have the power to recompile these, we
960 emulate the SVr4 behavior. Sigh. */
961 down_write(¤t->mm->mmap_sem);
962 error = do_mmap(NULL, 0, PAGE_SIZE, PROT_READ | PROT_EXEC,
963 MAP_FIXED | MAP_PRIVATE, 0);
964 up_write(¤t->mm->mmap_sem);
969 * The ABI may specify that certain registers be set up in special
970 * ways (on i386 %edx is the address of a DT_FINI function, for
971 * example. In addition, it may also specify (eg, PowerPC64 ELF)
972 * that the e_entry field is the address of the function descriptor
973 * for the startup routine, rather than the address of the startup
974 * routine itself. This macro performs whatever initialization to
975 * the regs structure is required as well as any relocations to the
976 * function descriptor entries when executing dynamically links apps.
978 ELF_PLAT_INIT(regs, reloc_func_desc);
981 start_thread(regs, elf_entry, bprm->p);
982 if (unlikely(current->ptrace & PT_PTRACED)) {
983 if (current->ptrace & PT_TRACE_EXEC)
984 ptrace_notify ((PTRACE_EVENT_EXEC << 8) | SIGTRAP);
986 send_sig(SIGTRAP, current, 0);
994 allow_write_access(interpreter);
999 kfree(elf_interpreter);
1001 sys_close(elf_exec_fileno);
1004 put_files_struct(current->files);
1005 current->files = files;
1009 current->flags &= ~PF_RELOCEXEC;
1010 current->flags |= old_relocexec;
1014 /* This is really simpleminded and specialized - we are loading an
1015 a.out library that is given an ELF header. */
1017 static int load_elf_library(struct file *file)
1019 struct elf_phdr *elf_phdata;
1020 unsigned long elf_bss, bss, len;
1021 int retval, error, i, j;
1022 struct elfhdr elf_ex;
1025 retval = kernel_read(file, 0, (char *) &elf_ex, sizeof(elf_ex));
1026 if (retval != sizeof(elf_ex))
1029 if (memcmp(elf_ex.e_ident, ELFMAG, SELFMAG) != 0)
1032 /* First of all, some simple consistency checks */
1033 if (elf_ex.e_type != ET_EXEC || elf_ex.e_phnum > 2 ||
1034 !elf_check_arch(&elf_ex) || !file->f_op || !file->f_op->mmap)
1037 /* Now read in all of the header information */
1039 j = sizeof(struct elf_phdr) * elf_ex.e_phnum;
1040 /* j < ELF_MIN_ALIGN because elf_ex.e_phnum <= 2 */
1043 elf_phdata = (struct elf_phdr *) kmalloc(j, GFP_KERNEL);
1048 retval = kernel_read(file, elf_ex.e_phoff, (char *) elf_phdata, j);
1052 for (j = 0, i = 0; i<elf_ex.e_phnum; i++)
1053 if ((elf_phdata + i)->p_type == PT_LOAD) j++;
1057 while (elf_phdata->p_type != PT_LOAD) elf_phdata++;
1059 /* Now use mmap to map the library into memory. */
1060 down_write(¤t->mm->mmap_sem);
1061 error = do_mmap(file,
1062 ELF_PAGESTART(elf_phdata->p_vaddr),
1063 (elf_phdata->p_filesz +
1064 ELF_PAGEOFFSET(elf_phdata->p_vaddr)),
1065 PROT_READ | PROT_WRITE | PROT_EXEC,
1066 MAP_FIXED | MAP_PRIVATE | MAP_DENYWRITE,
1067 (elf_phdata->p_offset -
1068 ELF_PAGEOFFSET(elf_phdata->p_vaddr)));
1069 up_write(¤t->mm->mmap_sem);
1070 if (error != ELF_PAGESTART(elf_phdata->p_vaddr))
1073 elf_bss = elf_phdata->p_vaddr + elf_phdata->p_filesz;
1076 len = ELF_PAGESTART(elf_phdata->p_filesz + elf_phdata->p_vaddr + ELF_MIN_ALIGN - 1);
1077 bss = elf_phdata->p_memsz + elf_phdata->p_vaddr;
1079 do_brk(len, bss - len);
1089 * Note that some platforms still use traditional core dumps and not
1090 * the ELF core dump. Each platform can select it as appropriate.
1092 #ifdef USE_ELF_CORE_DUMP
1097 * Modelled on fs/exec.c:aout_core_dump()
1098 * Jeremy Fitzhardinge <jeremy@sw.oz.au>
1101 * These are the only things you should do on a core-file: use only these
1102 * functions to write out all the necessary info.
1104 static int dump_write(struct file *file, const void *addr, int nr)
1106 return file->f_op->write(file, addr, nr, &file->f_pos) == nr;
1109 static int dump_seek(struct file *file, off_t off)
1111 if (file->f_op->llseek) {
1112 if (file->f_op->llseek(file, off, 0) != off)
1120 * Decide whether a segment is worth dumping; default is yes to be
1121 * sure (missing info is worse than too much; etc).
1122 * Personally I'd include everything, and use the coredump limit...
1124 * I think we should skip something. But I am not sure how. H.J.
1126 static int maydump(struct vm_area_struct *vma)
1129 * If we may not read the contents, don't allow us to dump
1130 * them either. "dump_write()" can't handle it anyway.
1132 if (!(vma->vm_flags & VM_READ))
1135 /* Do not dump I/O mapped devices! -DaveM */
1136 if (vma->vm_flags & VM_IO)
1139 if (vma->vm_flags & (VM_WRITE|VM_GROWSUP|VM_GROWSDOWN))
1141 if (vma->vm_flags & (VM_READ|VM_EXEC|VM_EXECUTABLE|VM_SHARED))
1147 #define roundup(x, y) ((((x)+((y)-1))/(y))*(y))
1149 /* An ELF note in memory */
1154 unsigned int datasz;
1158 static int notesize(struct memelfnote *en)
1162 sz = sizeof(struct elf_note);
1163 sz += roundup(strlen(en->name) + 1, 4);
1164 sz += roundup(en->datasz, 4);
1169 #define DUMP_WRITE(addr, nr) \
1170 do { if (!dump_write(file, (addr), (nr))) return 0; } while(0)
1171 #define DUMP_SEEK(off) \
1172 do { if (!dump_seek(file, (off))) return 0; } while(0)
1174 static int writenote(struct memelfnote *men, struct file *file)
1178 en.n_namesz = strlen(men->name) + 1;
1179 en.n_descsz = men->datasz;
1180 en.n_type = men->type;
1182 DUMP_WRITE(&en, sizeof(en));
1183 DUMP_WRITE(men->name, en.n_namesz);
1184 /* XXX - cast from long long to long to avoid need for libgcc.a */
1185 DUMP_SEEK(roundup((unsigned long)file->f_pos, 4)); /* XXX */
1186 DUMP_WRITE(men->data, men->datasz);
1187 DUMP_SEEK(roundup((unsigned long)file->f_pos, 4)); /* XXX */
1194 #define DUMP_WRITE(addr, nr) \
1195 if ((size += (nr)) > limit || !dump_write(file, (addr), (nr))) \
1197 #define DUMP_SEEK(off) \
1198 if (!dump_seek(file, (off))) \
1201 static inline void fill_elf_header(struct elfhdr *elf, int segs)
1203 memcpy(elf->e_ident, ELFMAG, SELFMAG);
1204 elf->e_ident[EI_CLASS] = ELF_CLASS;
1205 elf->e_ident[EI_DATA] = ELF_DATA;
1206 elf->e_ident[EI_VERSION] = EV_CURRENT;
1207 elf->e_ident[EI_OSABI] = ELF_OSABI;
1208 memset(elf->e_ident+EI_PAD, 0, EI_NIDENT-EI_PAD);
1210 elf->e_type = ET_CORE;
1211 elf->e_machine = ELF_ARCH;
1212 elf->e_version = EV_CURRENT;
1214 elf->e_phoff = sizeof(struct elfhdr);
1217 elf->e_ehsize = sizeof(struct elfhdr);
1218 elf->e_phentsize = sizeof(struct elf_phdr);
1219 elf->e_phnum = segs;
1220 elf->e_shentsize = 0;
1222 elf->e_shstrndx = 0;
1226 static inline void fill_elf_note_phdr(struct elf_phdr *phdr, int sz, off_t offset)
1228 phdr->p_type = PT_NOTE;
1229 phdr->p_offset = offset;
1232 phdr->p_filesz = sz;
1239 static void fill_note(struct memelfnote *note, const char *name, int type,
1240 unsigned int sz, void *data)
1250 * fill up all the fields in prstatus from the given task struct, except registers
1251 * which need to be filled up separately.
1253 static void fill_prstatus(struct elf_prstatus *prstatus,
1254 struct task_struct *p, long signr)
1256 prstatus->pr_info.si_signo = prstatus->pr_cursig = signr;
1257 prstatus->pr_sigpend = p->pending.signal.sig[0];
1258 prstatus->pr_sighold = p->blocked.sig[0];
1259 prstatus->pr_pid = p->pid;
1260 prstatus->pr_ppid = p->parent->pid;
1261 prstatus->pr_pgrp = process_group(p);
1262 prstatus->pr_sid = p->signal->session;
1263 jiffies_to_timeval(p->utime, &prstatus->pr_utime);
1264 jiffies_to_timeval(p->stime, &prstatus->pr_stime);
1265 jiffies_to_timeval(p->cutime, &prstatus->pr_cutime);
1266 jiffies_to_timeval(p->cstime, &prstatus->pr_cstime);
1269 static void fill_psinfo(struct elf_prpsinfo *psinfo, struct task_struct *p,
1270 struct mm_struct *mm)
1274 /* first copy the parameters from user space */
1275 memset(psinfo, 0, sizeof(struct elf_prpsinfo));
1277 len = mm->arg_end - mm->arg_start;
1278 if (len >= ELF_PRARGSZ)
1279 len = ELF_PRARGSZ-1;
1280 copy_from_user(&psinfo->pr_psargs,
1281 (const char __user *)mm->arg_start, len);
1282 for(i = 0; i < len; i++)
1283 if (psinfo->pr_psargs[i] == 0)
1284 psinfo->pr_psargs[i] = ' ';
1285 psinfo->pr_psargs[len] = 0;
1287 psinfo->pr_pid = p->pid;
1288 psinfo->pr_ppid = p->parent->pid;
1289 psinfo->pr_pgrp = process_group(p);
1290 psinfo->pr_sid = p->signal->session;
1292 i = p->state ? ffz(~p->state) + 1 : 0;
1293 psinfo->pr_state = i;
1294 psinfo->pr_sname = (i < 0 || i > 5) ? '.' : "RSDTZW"[i];
1295 psinfo->pr_zomb = psinfo->pr_sname == 'Z';
1296 psinfo->pr_nice = task_nice(p);
1297 psinfo->pr_flag = p->flags;
1298 SET_UID(psinfo->pr_uid, p->uid);
1299 SET_GID(psinfo->pr_gid, p->gid);
1300 strncpy(psinfo->pr_fname, p->comm, sizeof(psinfo->pr_fname));
1305 /* Here is the structure in which status of each thread is captured. */
1306 struct elf_thread_status
1308 struct list_head list;
1309 struct elf_prstatus prstatus; /* NT_PRSTATUS */
1310 elf_fpregset_t fpu; /* NT_PRFPREG */
1311 #ifdef ELF_CORE_COPY_XFPREGS
1312 elf_fpxregset_t xfpu; /* NT_PRXFPREG */
1314 struct memelfnote notes[3];
1319 * In order to add the specific thread information for the elf file format,
1320 * we need to keep a linked list of every threads pr_status and then
1321 * create a single section for them in the final core file.
1323 static int elf_dump_thread_status(long signr, struct task_struct * p, struct list_head * thread_list)
1326 struct elf_thread_status *t;
1329 t = kmalloc(sizeof(*t), GFP_ATOMIC);
1332 memset(t, 0, sizeof(*t));
1334 INIT_LIST_HEAD(&t->list);
1337 fill_prstatus(&t->prstatus, p, signr);
1338 elf_core_copy_task_regs(p, &t->prstatus.pr_reg);
1340 fill_note(&t->notes[0], "CORE", NT_PRSTATUS, sizeof(t->prstatus), &(t->prstatus));
1342 sz += notesize(&t->notes[0]);
1344 if ((t->prstatus.pr_fpvalid = elf_core_copy_task_fpregs(p, NULL, &t->fpu))) {
1345 fill_note(&t->notes[1], "CORE", NT_PRFPREG, sizeof(t->fpu), &(t->fpu));
1347 sz += notesize(&t->notes[1]);
1350 #ifdef ELF_CORE_COPY_XFPREGS
1351 if (elf_core_copy_task_xfpregs(p, &t->xfpu)) {
1352 fill_note(&t->notes[2], "LINUX", NT_PRXFPREG, sizeof(t->xfpu), &t->xfpu);
1354 sz += notesize(&t->notes[2]);
1357 list_add(&t->list, thread_list);
1364 * This is a two-pass process; first we find the offsets of the bits,
1365 * and then they are actually written out. If we run out of core limit
1368 static int elf_core_dump(long signr, struct pt_regs * regs, struct file * file)
1376 struct vm_area_struct *vma;
1377 struct elfhdr *elf = NULL;
1378 off_t offset = 0, dataoff;
1379 unsigned long limit = current->rlim[RLIMIT_CORE].rlim_cur;
1381 struct memelfnote *notes = NULL;
1382 struct elf_prstatus *prstatus = NULL; /* NT_PRSTATUS */
1383 struct elf_prpsinfo *psinfo = NULL; /* NT_PRPSINFO */
1384 struct task_struct *g, *p;
1385 LIST_HEAD(thread_list);
1386 struct list_head *t;
1387 elf_fpregset_t *fpu = NULL;
1388 #ifdef ELF_CORE_COPY_XFPREGS
1389 elf_fpxregset_t *xfpu = NULL;
1391 int thread_status_size = 0;
1395 * We no longer stop all VM operations.
1397 * This is because those proceses that could possibly change map_count or
1398 * the mmap / vma pages are now blocked in do_exit on current finishing
1401 * Only ptrace can touch these memory addresses, but it doesn't change
1402 * the map_count or the pages allocated. So no possibility of crashing
1403 * exists while dumping the mm->vm_next areas to the core file.
1406 /* alloc memory for large data structures: too large to be on stack */
1407 elf = kmalloc(sizeof(*elf), GFP_KERNEL);
1410 prstatus = kmalloc(sizeof(*prstatus), GFP_KERNEL);
1413 psinfo = kmalloc(sizeof(*psinfo), GFP_KERNEL);
1416 notes = kmalloc(NUM_NOTES * sizeof(struct memelfnote), GFP_KERNEL);
1419 fpu = kmalloc(sizeof(*fpu), GFP_KERNEL);
1422 #ifdef ELF_CORE_COPY_XFPREGS
1423 xfpu = kmalloc(sizeof(*xfpu), GFP_KERNEL);
1428 /* capture the status of all other threads */
1430 read_lock(&tasklist_lock);
1432 if (current->mm == p->mm && current != p) {
1433 int sz = elf_dump_thread_status(signr, p, &thread_list);
1435 read_unlock(&tasklist_lock);
1438 thread_status_size += sz;
1440 while_each_thread(g,p);
1441 read_unlock(&tasklist_lock);
1444 /* now collect the dump for the current */
1445 memset(prstatus, 0, sizeof(*prstatus));
1446 fill_prstatus(prstatus, current, signr);
1447 elf_core_copy_regs(&prstatus->pr_reg, regs);
1449 segs = current->mm->map_count;
1450 #ifdef ELF_CORE_EXTRA_PHDRS
1451 segs += ELF_CORE_EXTRA_PHDRS;
1455 fill_elf_header(elf, segs+1); /* including notes section */
1458 current->flags |= PF_DUMPCORE;
1461 * Set up the notes in similar form to SVR4 core dumps made
1462 * with info from their /proc.
1465 fill_note(notes +0, "CORE", NT_PRSTATUS, sizeof(*prstatus), prstatus);
1467 fill_psinfo(psinfo, current->group_leader, current->mm);
1468 fill_note(notes +1, "CORE", NT_PRPSINFO, sizeof(*psinfo), psinfo);
1470 fill_note(notes +2, "CORE", NT_TASKSTRUCT, sizeof(*current), current);
1474 auxv = (elf_addr_t *) current->mm->saved_auxv;
1479 while (auxv[i - 2] != AT_NULL);
1480 fill_note(¬es[numnote++], "CORE", NT_AUXV,
1481 i * sizeof (elf_addr_t), auxv);
1483 /* Try to dump the FPU. */
1484 if ((prstatus->pr_fpvalid = elf_core_copy_task_fpregs(current, regs, fpu)))
1485 fill_note(notes + numnote++,
1486 "CORE", NT_PRFPREG, sizeof(*fpu), fpu);
1487 #ifdef ELF_CORE_COPY_XFPREGS
1488 if (elf_core_copy_task_xfpregs(current, xfpu))
1489 fill_note(notes + numnote++,
1490 "LINUX", NT_PRXFPREG, sizeof(*xfpu), xfpu);
1496 DUMP_WRITE(elf, sizeof(*elf));
1497 offset += sizeof(*elf); /* Elf header */
1498 offset += (segs+1) * sizeof(struct elf_phdr); /* Program headers */
1500 /* Write notes phdr entry */
1502 struct elf_phdr phdr;
1505 for (i = 0; i < numnote; i++)
1506 sz += notesize(notes + i);
1508 sz += thread_status_size;
1510 fill_elf_note_phdr(&phdr, sz, offset);
1512 DUMP_WRITE(&phdr, sizeof(phdr));
1515 /* Page-align dumped data */
1516 dataoff = offset = roundup(offset, ELF_EXEC_PAGESIZE);
1518 /* Write program headers for segments dump */
1519 for (vma = current->mm->mmap; vma != NULL; vma = vma->vm_next) {
1520 struct elf_phdr phdr;
1523 sz = vma->vm_end - vma->vm_start;
1525 phdr.p_type = PT_LOAD;
1526 phdr.p_offset = offset;
1527 phdr.p_vaddr = vma->vm_start;
1529 phdr.p_filesz = maydump(vma) ? sz : 0;
1531 offset += phdr.p_filesz;
1532 phdr.p_flags = vma->vm_flags & VM_READ ? PF_R : 0;
1533 if (vma->vm_flags & VM_WRITE) phdr.p_flags |= PF_W;
1534 if (vma->vm_flags & VM_EXEC) phdr.p_flags |= PF_X;
1535 phdr.p_align = ELF_EXEC_PAGESIZE;
1537 DUMP_WRITE(&phdr, sizeof(phdr));
1540 #ifdef ELF_CORE_WRITE_EXTRA_PHDRS
1541 ELF_CORE_WRITE_EXTRA_PHDRS;
1544 /* write out the notes section */
1545 for (i = 0; i < numnote; i++)
1546 if (!writenote(notes + i, file))
1549 /* write out the thread status notes section */
1550 list_for_each(t, &thread_list) {
1551 struct elf_thread_status *tmp = list_entry(t, struct elf_thread_status, list);
1552 for (i = 0; i < tmp->num_notes; i++)
1553 if (!writenote(&tmp->notes[i], file))
1559 for (vma = current->mm->mmap; vma != NULL; vma = vma->vm_next) {
1565 for (addr = vma->vm_start;
1567 addr += PAGE_SIZE) {
1569 struct vm_area_struct *vma;
1571 if (get_user_pages(current, current->mm, addr, 1, 0, 1,
1572 &page, &vma) <= 0) {
1573 DUMP_SEEK (file->f_pos + PAGE_SIZE);
1575 if (page == ZERO_PAGE(addr)) {
1576 DUMP_SEEK (file->f_pos + PAGE_SIZE);
1579 flush_cache_page(vma, addr);
1581 if ((size += PAGE_SIZE) > limit ||
1582 !dump_write(file, kaddr,
1585 page_cache_release(page);
1590 page_cache_release(page);
1595 #ifdef ELF_CORE_WRITE_EXTRA_DATA
1596 ELF_CORE_WRITE_EXTRA_DATA;
1599 if ((off_t) file->f_pos != offset) {
1601 printk("elf_core_dump: file->f_pos (%ld) != offset (%ld)\n",
1602 (off_t) file->f_pos, offset);
1609 while(!list_empty(&thread_list)) {
1610 struct list_head *tmp = thread_list.next;
1612 kfree(list_entry(tmp, struct elf_thread_status, list));
1620 #ifdef ELF_CORE_COPY_XFPREGS
1627 #endif /* USE_ELF_CORE_DUMP */
1629 static int __init init_elf_binfmt(void)
1631 return register_binfmt(&elf_format);
1634 static void __exit exit_elf_binfmt(void)
1636 /* Remove the COFF and ELF loaders. */
1637 unregister_binfmt(&elf_format);
1640 core_initcall(init_elf_binfmt);
1641 module_exit(exit_elf_binfmt);
1642 MODULE_LICENSE("GPL");