2 * linux/fs/binfmt_elf.c
4 * These are the functions used to load ELF format executables as used
5 * on SVr4 machines. Information on the format may be found in the book
6 * "UNIX SYSTEM V RELEASE 4 Programmers Guide: Ansi C and Programming Support
9 * Copyright 1993, 1994: Eric Youngdale (ericy@cais.com).
12 #include <linux/module.h>
13 #include <linux/kernel.h>
15 #include <linux/stat.h>
16 #include <linux/time.h>
18 #include <linux/mman.h>
19 #include <linux/a.out.h>
20 #include <linux/errno.h>
21 #include <linux/signal.h>
22 #include <linux/binfmts.h>
23 #include <linux/string.h>
24 #include <linux/file.h>
25 #include <linux/fcntl.h>
26 #include <linux/ptrace.h>
27 #include <linux/slab.h>
28 #include <linux/shm.h>
29 #include <linux/personality.h>
30 #include <linux/elfcore.h>
31 #include <linux/init.h>
32 #include <linux/highuid.h>
33 #include <linux/smp.h>
34 #include <linux/smp_lock.h>
35 #include <linux/compiler.h>
36 #include <linux/highmem.h>
37 #include <linux/pagemap.h>
38 #include <linux/security.h>
39 #include <linux/syscalls.h>
41 #include <asm/uaccess.h>
42 #include <asm/param.h>
43 #include <asm/pgalloc.h>
45 #include <linux/elf.h>
47 static int load_elf_binary(struct linux_binprm * bprm, struct pt_regs * regs);
48 static int load_elf_library(struct file*);
49 static unsigned long elf_map (struct file *, unsigned long, struct elf_phdr *, int, int, unsigned long);
50 extern int dump_fpu (struct pt_regs *, elf_fpregset_t *);
53 #define elf_addr_t unsigned long
57 * If we don't support core dumping, then supply a NULL so we
60 #ifdef USE_ELF_CORE_DUMP
61 static int elf_core_dump(long signr, struct pt_regs * regs, struct file * file);
63 #define elf_core_dump NULL
66 #if ELF_EXEC_PAGESIZE > PAGE_SIZE
67 # define ELF_MIN_ALIGN ELF_EXEC_PAGESIZE
69 # define ELF_MIN_ALIGN PAGE_SIZE
72 #define ELF_PAGESTART(_v) ((_v) & ~(unsigned long)(ELF_MIN_ALIGN-1))
73 #define ELF_PAGEOFFSET(_v) ((_v) & (ELF_MIN_ALIGN-1))
74 #define ELF_PAGEALIGN(_v) (((_v) + ELF_MIN_ALIGN - 1) & ~(ELF_MIN_ALIGN - 1))
76 static struct linux_binfmt elf_format = {
77 .module = THIS_MODULE,
78 .load_binary = load_elf_binary,
79 .load_shlib = load_elf_library,
80 .core_dump = elf_core_dump,
81 .min_coredump = ELF_EXEC_PAGESIZE
84 #define BAD_ADDR(x) ((unsigned long)(x) > TASK_SIZE)
86 static int set_brk(unsigned long start, unsigned long end)
88 start = ELF_PAGEALIGN(start);
89 end = ELF_PAGEALIGN(end);
91 unsigned long addr = do_brk(start, end - start);
95 current->mm->start_brk = current->mm->brk = end;
100 /* We need to explicitly zero any fractional pages
101 after the data section (i.e. bss). This would
102 contain the junk from the file that should not
106 static void padzero(unsigned long elf_bss)
110 nbyte = ELF_PAGEOFFSET(elf_bss);
112 nbyte = ELF_MIN_ALIGN - nbyte;
113 clear_user((void __user *) elf_bss, nbyte);
117 /* Let's use some macros to make this stack manipulation a litle clearer */
118 #ifdef CONFIG_STACK_GROWSUP
119 #define STACK_ADD(sp, items) ((elf_addr_t __user *)(sp) + (items))
120 #define STACK_ROUND(sp, items) \
121 ((15 + (unsigned long) ((sp) + (items))) &~ 15UL)
122 #define STACK_ALLOC(sp, len) ({ elf_addr_t __user *old_sp = (elf_addr_t __user *)sp; sp += len; old_sp; })
124 #define STACK_ADD(sp, items) ((elf_addr_t __user *)(sp) - (items))
125 #define STACK_ROUND(sp, items) \
126 (((unsigned long) (sp - items)) &~ 15UL)
127 #define STACK_ALLOC(sp, len) ({ sp -= len ; sp; })
131 create_elf_tables(struct linux_binprm *bprm, struct elfhdr * exec,
132 int interp_aout, unsigned long load_addr,
133 unsigned long interp_load_addr)
135 unsigned long p = bprm->p;
136 int argc = bprm->argc;
137 int envc = bprm->envc;
138 elf_addr_t __user *argv;
139 elf_addr_t __user *envp;
140 elf_addr_t __user *sp;
141 elf_addr_t __user *u_platform;
142 const char *k_platform = ELF_PLATFORM;
144 elf_addr_t *elf_info;
146 struct task_struct *tsk = current;
149 * If this architecture has a platform capability string, copy it
150 * to userspace. In some cases (Sparc), this info is impossible
151 * for userspace to get any other way, in others (i386) it is
157 size_t len = strlen(k_platform) + 1;
159 #ifdef __HAVE_ARCH_ALIGN_STACK
160 p = (unsigned long)arch_align_stack((unsigned long)p);
162 u_platform = (elf_addr_t __user *)STACK_ALLOC(p, len);
163 __copy_to_user(u_platform, k_platform, len);
166 /* Create the ELF interpreter info */
167 elf_info = (elf_addr_t *) current->mm->saved_auxv;
168 #define NEW_AUX_ENT(id, val) \
169 do { elf_info[ei_index++] = id; elf_info[ei_index++] = val; } while (0)
173 * ARCH_DLINFO must come first so PPC can do its special alignment of
178 NEW_AUX_ENT(AT_HWCAP, ELF_HWCAP);
179 NEW_AUX_ENT(AT_PAGESZ, ELF_EXEC_PAGESIZE);
180 NEW_AUX_ENT(AT_CLKTCK, CLOCKS_PER_SEC);
181 NEW_AUX_ENT(AT_PHDR, load_addr + exec->e_phoff);
182 NEW_AUX_ENT(AT_PHENT, sizeof (struct elf_phdr));
183 NEW_AUX_ENT(AT_PHNUM, exec->e_phnum);
184 NEW_AUX_ENT(AT_BASE, interp_load_addr);
185 NEW_AUX_ENT(AT_FLAGS, 0);
186 NEW_AUX_ENT(AT_ENTRY, exec->e_entry);
187 NEW_AUX_ENT(AT_UID, (elf_addr_t) tsk->uid);
188 NEW_AUX_ENT(AT_EUID, (elf_addr_t) tsk->euid);
189 NEW_AUX_ENT(AT_GID, (elf_addr_t) tsk->gid);
190 NEW_AUX_ENT(AT_EGID, (elf_addr_t) tsk->egid);
191 NEW_AUX_ENT(AT_SECURE, (elf_addr_t) security_bprm_secureexec(bprm));
193 NEW_AUX_ENT(AT_PLATFORM, (elf_addr_t)(long)u_platform);
196 /* AT_NULL is zero; clear the rest too */
197 memset(&elf_info[ei_index], 0,
198 sizeof current->mm->saved_auxv - ei_index * sizeof elf_info[0]);
200 /* And advance past the AT_NULL entry. */
203 sp = STACK_ADD(p, ei_index);
205 items = (argc + 1) + (envc + 1);
207 items += 3; /* a.out interpreters require argv & envp too */
209 items += 1; /* ELF interpreters only put argc on the stack */
211 bprm->p = STACK_ROUND(sp, items);
213 /* Point sp at the lowest address on the stack */
214 #ifdef CONFIG_STACK_GROWSUP
215 sp = (elf_addr_t __user *)bprm->p - items - ei_index;
216 bprm->exec = (unsigned long) sp; /* XXX: PARISC HACK */
218 sp = (elf_addr_t __user *)bprm->p;
221 /* Now, let's put argc (and argv, envp if appropriate) on the stack */
222 __put_user(argc, sp++);
225 envp = argv + argc + 1;
226 __put_user((elf_addr_t)(long)argv, sp++);
227 __put_user((elf_addr_t)(long)envp, sp++);
230 envp = argv + argc + 1;
233 /* Populate argv and envp */
234 p = current->mm->arg_start;
237 __put_user((elf_addr_t)p, argv++);
238 len = strnlen_user((void __user *)p, PAGE_SIZE*MAX_ARG_PAGES);
239 if (!len || len > PAGE_SIZE*MAX_ARG_PAGES)
244 current->mm->arg_end = current->mm->env_start = p;
247 __put_user((elf_addr_t)p, envp++);
248 len = strnlen_user((void __user *)p, PAGE_SIZE*MAX_ARG_PAGES);
249 if (!len || len > PAGE_SIZE*MAX_ARG_PAGES)
254 current->mm->env_end = p;
256 /* Put the elf_info on the stack in the right place. */
257 sp = (elf_addr_t __user *)envp + 1;
258 copy_to_user(sp, elf_info, ei_index * sizeof(elf_addr_t));
263 static unsigned long elf_map(struct file *filep, unsigned long addr,
264 struct elf_phdr *eppnt, int prot, int type,
265 unsigned long total_size)
267 unsigned long map_addr;
268 unsigned long size = eppnt->p_filesz + ELF_PAGEOFFSET(eppnt->p_vaddr);
269 unsigned long off = eppnt->p_offset - ELF_PAGEOFFSET(eppnt->p_vaddr);
271 addr = ELF_PAGESTART(addr);
272 size = ELF_PAGEALIGN(size);
274 down_write(¤t->mm->mmap_sem);
277 * total_size is the size of the ELF (interpreter) image.
278 * The _first_ mmap needs to know the full size, otherwise
279 * randomization might put this image into an overlapping
280 * position with the ELF binary image. (since size < total_size)
281 * So we first map the 'big' image - and unmap the remainder at
282 * the end. (which unmap is needed for ELF images with holes.)
285 total_size = ELF_PAGEALIGN(total_size);
286 map_addr = do_mmap(filep, addr, total_size, prot, type, off);
287 if (!BAD_ADDR(map_addr))
288 do_munmap(current->mm, map_addr+size, total_size-size);
290 map_addr = do_mmap(filep, addr, size, prot, type, off);
292 up_write(¤t->mm->mmap_sem);
297 #endif /* !elf_map */
299 static inline unsigned long total_mapping_size(struct elf_phdr *cmds, int nr)
301 int i, first_idx = -1, last_idx = -1;
303 for (i = 0; i < nr; i++)
304 if (cmds[i].p_type == PT_LOAD) {
313 return cmds[last_idx].p_vaddr + cmds[last_idx].p_memsz -
314 ELF_PAGESTART(cmds[first_idx].p_vaddr);
317 /* This is much more generalized than the library routine read function,
318 so we keep this separate. Technically the library read function
319 is only provided so that we can read a.out libraries that have
322 static unsigned long load_elf_interp(struct elfhdr * interp_elf_ex,
323 struct file * interpreter,
324 unsigned long *interp_load_addr,
325 unsigned long no_base)
327 struct elf_phdr *elf_phdata;
328 struct elf_phdr *eppnt;
329 unsigned long load_addr = 0;
330 int load_addr_set = 0;
331 unsigned long last_bss = 0, elf_bss = 0;
332 unsigned long error = ~0UL;
333 unsigned long total_size;
336 /* First of all, some simple consistency checks */
337 if (interp_elf_ex->e_type != ET_EXEC &&
338 interp_elf_ex->e_type != ET_DYN)
340 if (!elf_check_arch(interp_elf_ex))
342 if (!interpreter->f_op || !interpreter->f_op->mmap)
346 * If the size of this structure has changed, then punt, since
347 * we will be doing the wrong thing.
349 if (interp_elf_ex->e_phentsize != sizeof(struct elf_phdr))
351 if (interp_elf_ex->e_phnum > 65536U / sizeof(struct elf_phdr))
354 /* Now read in all of the header information */
356 size = sizeof(struct elf_phdr) * interp_elf_ex->e_phnum;
357 if (size > ELF_MIN_ALIGN)
359 elf_phdata = (struct elf_phdr *) kmalloc(size, GFP_KERNEL);
363 retval = kernel_read(interpreter,interp_elf_ex->e_phoff,(char *)elf_phdata,size);
368 total_size = total_mapping_size(elf_phdata, interp_elf_ex->e_phnum);
373 for (i=0; i<interp_elf_ex->e_phnum; i++, eppnt++) {
374 if (eppnt->p_type == PT_LOAD) {
375 int elf_type = MAP_PRIVATE | MAP_DENYWRITE;
377 unsigned long vaddr = 0;
378 unsigned long k, map_addr;
380 if (eppnt->p_flags & PF_R) elf_prot = PROT_READ;
381 if (eppnt->p_flags & PF_W) elf_prot |= PROT_WRITE;
382 if (eppnt->p_flags & PF_X) elf_prot |= PROT_EXEC;
383 vaddr = eppnt->p_vaddr;
384 if (interp_elf_ex->e_type == ET_EXEC || load_addr_set)
385 elf_type |= MAP_FIXED;
386 else if (no_base && interp_elf_ex->e_type == ET_DYN)
389 map_addr = elf_map(interpreter, load_addr + vaddr, eppnt, elf_prot, elf_type, total_size);
392 if (BAD_ADDR(map_addr))
395 if (!load_addr_set && interp_elf_ex->e_type == ET_DYN) {
396 load_addr = map_addr - ELF_PAGESTART(vaddr);
401 * Check to see if the section's size will overflow the
402 * allowed task size. Note that p_filesz must always be
403 * <= p_memsize so it is only necessary to check p_memsz.
405 k = load_addr + eppnt->p_vaddr;
406 if (k > TASK_SIZE || eppnt->p_filesz > eppnt->p_memsz ||
407 eppnt->p_memsz > TASK_SIZE || TASK_SIZE - eppnt->p_memsz < k) {
413 * Find the end of the file mapping for this phdr, and keep
414 * track of the largest address we see for this.
416 k = load_addr + eppnt->p_vaddr + eppnt->p_filesz;
421 * Do the same thing for the memory mapping - between
422 * elf_bss and last_bss is the bss section.
424 k = load_addr + eppnt->p_memsz + eppnt->p_vaddr;
431 * Now fill out the bss section. First pad the last page up
432 * to the page boundary, and then perform a mmap to make sure
433 * that there are zero-mapped pages up to and including the
437 elf_bss = ELF_PAGESTART(elf_bss + ELF_MIN_ALIGN - 1); /* What we have mapped so far */
439 /* Map the last of the bss segment */
440 if (last_bss > elf_bss) {
441 error = do_brk(elf_bss, last_bss - elf_bss);
446 *interp_load_addr = load_addr;
447 error = ((unsigned long) interp_elf_ex->e_entry) + load_addr;
455 static unsigned long load_aout_interp(struct exec * interp_ex,
456 struct file * interpreter)
458 unsigned long text_data, elf_entry = ~0UL;
462 current->mm->end_code = interp_ex->a_text;
463 text_data = interp_ex->a_text + interp_ex->a_data;
464 current->mm->end_data = text_data;
465 current->mm->brk = interp_ex->a_bss + text_data;
467 switch (N_MAGIC(*interp_ex)) {
470 addr = (char __user *)0;
474 offset = N_TXTOFF(*interp_ex);
475 addr = (char __user *) N_TXTADDR(*interp_ex);
481 do_brk(0, text_data);
482 if (!interpreter->f_op || !interpreter->f_op->read)
484 if (interpreter->f_op->read(interpreter, addr, text_data, &offset) < 0)
486 flush_icache_range((unsigned long)addr,
487 (unsigned long)addr + text_data);
489 do_brk(ELF_PAGESTART(text_data + ELF_MIN_ALIGN - 1),
491 elf_entry = interp_ex->a_entry;
498 * These are the functions used to load ELF style executables and shared
499 * libraries. There is no binary dependent code anywhere else.
502 #define INTERPRETER_NONE 0
503 #define INTERPRETER_AOUT 1
504 #define INTERPRETER_ELF 2
507 static int load_elf_binary(struct linux_binprm * bprm, struct pt_regs * regs)
509 struct file *interpreter = NULL; /* to shut gcc up */
510 unsigned long load_addr = 0, load_bias = 0;
511 int load_addr_set = 0;
512 char * elf_interpreter = NULL;
513 unsigned int interpreter_type = INTERPRETER_NONE;
514 unsigned char ibcs2_interpreter = 0;
516 struct elf_phdr * elf_ppnt, *elf_phdata;
517 unsigned long elf_bss, elf_brk;
521 unsigned long elf_entry, interp_load_addr = 0;
522 unsigned long start_code, end_code, start_data, end_data;
523 unsigned long reloc_func_desc = 0;
524 struct elfhdr elf_ex;
525 struct elfhdr interp_elf_ex;
526 struct exec interp_ex;
527 char passed_fileno[6];
528 struct files_struct *files;
529 int executable_stack, relocexec, old_relocexec = current->flags & PF_RELOCEXEC;
531 /* Get the exec-header */
532 elf_ex = *((struct elfhdr *) bprm->buf);
535 /* First of all, some simple consistency checks */
536 if (memcmp(elf_ex.e_ident, ELFMAG, SELFMAG) != 0)
539 if (elf_ex.e_type != ET_EXEC && elf_ex.e_type != ET_DYN)
541 if (!elf_check_arch(&elf_ex))
543 if (!bprm->file->f_op||!bprm->file->f_op->mmap)
546 /* Now read in all of the header information */
549 if (elf_ex.e_phentsize != sizeof(struct elf_phdr))
551 if (elf_ex.e_phnum > 65536U / sizeof(struct elf_phdr))
553 size = elf_ex.e_phnum * sizeof(struct elf_phdr);
554 elf_phdata = (struct elf_phdr *) kmalloc(size, GFP_KERNEL);
558 retval = kernel_read(bprm->file, elf_ex.e_phoff, (char *) elf_phdata, size);
562 files = current->files; /* Refcounted so ok */
563 retval = unshare_files();
566 if (files == current->files) {
567 put_files_struct(files);
571 /* exec will make our files private anyway, but for the a.out
572 loader stuff we need to do it earlier */
574 retval = get_unused_fd();
577 get_file(bprm->file);
578 fd_install(elf_exec_fileno = retval, bprm->file);
580 elf_ppnt = elf_phdata;
589 for (i = 0; i < elf_ex.e_phnum; i++) {
590 if (elf_ppnt->p_type == PT_INTERP) {
591 /* This is the program interpreter used for
592 * shared libraries - for now assume that this
593 * is an a.out format binary
597 if (elf_ppnt->p_filesz > PATH_MAX)
599 elf_interpreter = (char *) kmalloc(elf_ppnt->p_filesz,
601 if (!elf_interpreter)
604 retval = kernel_read(bprm->file, elf_ppnt->p_offset,
608 goto out_free_interp;
609 /* If the program interpreter is one of these two,
610 * then assume an iBCS2 image. Otherwise assume
611 * a native linux image.
613 if (strcmp(elf_interpreter,"/usr/lib/libc.so.1") == 0 ||
614 strcmp(elf_interpreter,"/usr/lib/ld.so.1") == 0)
615 ibcs2_interpreter = 1;
618 * The early SET_PERSONALITY here is so that the lookup
619 * for the interpreter happens in the namespace of the
620 * to-be-execed image. SET_PERSONALITY can select an
623 * However, SET_PERSONALITY is NOT allowed to switch
624 * this task into the new images's memory mapping
625 * policy - that is, TASK_SIZE must still evaluate to
626 * that which is appropriate to the execing application.
627 * This is because exit_mmap() needs to have TASK_SIZE
628 * evaluate to the size of the old image.
630 * So if (say) a 64-bit application is execing a 32-bit
631 * application it is the architecture's responsibility
632 * to defer changing the value of TASK_SIZE until the
633 * switch really is going to happen - do this in
634 * flush_thread(). - akpm
636 SET_PERSONALITY(elf_ex, ibcs2_interpreter);
638 interpreter = open_exec(elf_interpreter);
639 retval = PTR_ERR(interpreter);
640 if (IS_ERR(interpreter))
641 goto out_free_interp;
642 retval = kernel_read(interpreter, 0, bprm->buf, BINPRM_BUF_SIZE);
644 goto out_free_dentry;
646 /* Get the exec headers */
647 interp_ex = *((struct exec *) bprm->buf);
648 interp_elf_ex = *((struct elfhdr *) bprm->buf);
654 elf_ppnt = elf_phdata;
655 executable_stack = EXSTACK_DEFAULT;
657 for (i = 0; i < elf_ex.e_phnum; i++, elf_ppnt++)
658 if (elf_ppnt->p_type == PT_GNU_STACK) {
659 if (elf_ppnt->p_flags & PF_X)
660 executable_stack = EXSTACK_ENABLE_X;
662 executable_stack = EXSTACK_DISABLE_X;
668 if (current->personality == PER_LINUX)
669 switch (exec_shield) {
671 if (executable_stack != EXSTACK_DEFAULT) {
672 current->flags |= PF_RELOCEXEC;
673 relocexec = PF_RELOCEXEC;
678 executable_stack = EXSTACK_DISABLE_X;
679 current->flags |= PF_RELOCEXEC;
680 relocexec = PF_RELOCEXEC;
684 /* Some simple consistency checks for the interpreter */
685 if (elf_interpreter) {
686 interpreter_type = INTERPRETER_ELF | INTERPRETER_AOUT;
688 /* Now figure out which format our binary is */
689 if ((N_MAGIC(interp_ex) != OMAGIC) &&
690 (N_MAGIC(interp_ex) != ZMAGIC) &&
691 (N_MAGIC(interp_ex) != QMAGIC))
692 interpreter_type = INTERPRETER_ELF;
694 if (memcmp(interp_elf_ex.e_ident, ELFMAG, SELFMAG) != 0)
695 interpreter_type &= ~INTERPRETER_ELF;
698 if (!interpreter_type)
699 goto out_free_dentry;
701 /* Make sure only one type was selected */
702 if ((interpreter_type & INTERPRETER_ELF) &&
703 interpreter_type != INTERPRETER_ELF) {
704 // FIXME - ratelimit this before re-enabling
705 // printk(KERN_WARNING "ELF: Ambiguous type, using ELF\n");
706 interpreter_type = INTERPRETER_ELF;
708 /* Verify the interpreter has a valid arch */
709 if ((interpreter_type == INTERPRETER_ELF) &&
710 !elf_check_arch(&interp_elf_ex))
711 goto out_free_dentry;
713 /* Executables without an interpreter also need a personality */
714 SET_PERSONALITY(elf_ex, ibcs2_interpreter);
717 /* OK, we are done with that, now set up the arg stuff,
718 and then start this sucker up */
720 if ((!bprm->sh_bang) && (interpreter_type == INTERPRETER_AOUT)) {
721 char *passed_p = passed_fileno;
722 sprintf(passed_fileno, "%d", elf_exec_fileno);
724 if (elf_interpreter) {
725 retval = copy_strings_kernel(1, &passed_p, bprm);
727 goto out_free_dentry;
732 /* Flush all traces of the currently running executable */
733 retval = flush_old_exec(bprm);
735 goto out_free_dentry;
736 current->flags |= relocexec;
740 * In the exec-shield-disabled case turn off the CS limit
744 arch_add_exec_range(current->mm, -1);
747 /* Discard our unneeded old files struct */
750 put_files_struct(files);
754 /* OK, This is the point of no return */
755 current->mm->start_data = 0;
756 current->mm->end_data = 0;
757 current->mm->end_code = 0;
758 current->mm->mmap = NULL;
759 #ifdef __HAVE_ARCH_MMAP_TOP
760 current->mm->mmap_top = mmap_top();
762 current->flags &= ~PF_FORKNOEXEC;
764 /* Do this immediately, since STACK_TOP as used in setup_arg_pages
765 may depend on the personality. */
766 SET_PERSONALITY(elf_ex, ibcs2_interpreter);
768 /* Do this so that we can load the interpreter, if need be. We will
769 change some of these later */
770 current->mm->rss = 0;
771 current->mm->free_area_cache = TASK_UNMAPPED_BASE;
772 current->mm->non_executable_cache = current->mm->mmap_top;
773 retval = setup_arg_pages(bprm, executable_stack);
775 send_sig(SIGKILL, current, 0);
776 goto out_free_dentry;
779 current->mm->start_stack = bprm->p;
782 /* Now we do a little grungy work by mmaping the ELF image into
783 the correct location in memory.
786 for(i = 0, elf_ppnt = elf_phdata; i < elf_ex.e_phnum; i++, elf_ppnt++) {
787 int elf_prot = 0, elf_flags;
788 unsigned long k, vaddr;
790 if (elf_ppnt->p_type != PT_LOAD)
793 if (unlikely (elf_brk > elf_bss)) {
796 /* There was a PT_LOAD segment with p_memsz > p_filesz
797 before this one. Map anonymous pages, if needed,
798 and clear the area. */
799 retval = set_brk (elf_bss + load_bias,
800 elf_brk + load_bias);
802 send_sig(SIGKILL, current, 0);
803 goto out_free_dentry;
805 nbyte = ELF_PAGEOFFSET(elf_bss);
807 nbyte = ELF_MIN_ALIGN - nbyte;
808 if (nbyte > elf_brk - elf_bss)
809 nbyte = elf_brk - elf_bss;
810 clear_user((void __user *) elf_bss + load_bias, nbyte);
814 if (elf_ppnt->p_flags & PF_R) elf_prot |= PROT_READ;
815 if (elf_ppnt->p_flags & PF_W) elf_prot |= PROT_WRITE;
816 if (elf_ppnt->p_flags & PF_X) elf_prot |= PROT_EXEC;
818 elf_flags = MAP_PRIVATE|MAP_DENYWRITE|MAP_EXECUTABLE;
820 vaddr = elf_ppnt->p_vaddr;
821 if (elf_ex.e_type == ET_EXEC || load_addr_set)
822 elf_flags |= MAP_FIXED;
823 else if (elf_ex.e_type == ET_DYN)
827 load_bias = ELF_PAGESTART(ELF_ET_DYN_BASE - vaddr);
830 error = elf_map(bprm->file, load_bias + vaddr, elf_ppnt, elf_prot, elf_flags, 0);
834 if (!load_addr_set) {
836 load_addr = (elf_ppnt->p_vaddr - elf_ppnt->p_offset);
837 if (elf_ex.e_type == ET_DYN) {
839 ELF_PAGESTART(load_bias + vaddr);
840 load_addr += load_bias;
841 reloc_func_desc = load_bias;
844 k = elf_ppnt->p_vaddr;
845 if (k < start_code) start_code = k;
846 if (start_data < k) start_data = k;
849 * Check to see if the section's size will overflow the
850 * allowed task size. Note that p_filesz must always be
851 * <= p_memsz so it is only necessary to check p_memsz.
853 if (k > TASK_SIZE || elf_ppnt->p_filesz > elf_ppnt->p_memsz ||
854 elf_ppnt->p_memsz > TASK_SIZE ||
855 TASK_SIZE - elf_ppnt->p_memsz < k) {
856 /* set_brk can never work. Avoid overflows. */
857 send_sig(SIGKILL, current, 0);
858 goto out_free_dentry;
861 k = elf_ppnt->p_vaddr + elf_ppnt->p_filesz;
865 if ((elf_ppnt->p_flags & PF_X) && end_code < k)
869 k = elf_ppnt->p_vaddr + elf_ppnt->p_memsz;
874 elf_ex.e_entry += load_bias;
875 elf_bss += load_bias;
876 elf_brk += load_bias;
877 start_code += load_bias;
878 end_code += load_bias;
879 start_data += load_bias;
880 end_data += load_bias;
882 /* Calling set_brk effectively mmaps the pages that we need
883 * for the bss and break sections. We must do this before
884 * mapping in the interpreter, to make sure it doesn't wind
885 * up getting placed where the bss needs to go.
887 retval = set_brk(elf_bss, elf_brk);
889 send_sig(SIGKILL, current, 0);
890 goto out_free_dentry;
894 if (elf_interpreter) {
895 if (interpreter_type == INTERPRETER_AOUT)
896 elf_entry = load_aout_interp(&interp_ex,
899 elf_entry = load_elf_interp(&interp_elf_ex,
903 if (BAD_ADDR(elf_entry)) {
904 printk(KERN_ERR "Unable to load interpreter\n");
905 send_sig(SIGSEGV, current, 0);
906 retval = -ENOEXEC; /* Nobody gets to see this, but.. */
907 goto out_free_dentry;
909 reloc_func_desc = interp_load_addr;
911 allow_write_access(interpreter);
913 kfree(elf_interpreter);
915 elf_entry = elf_ex.e_entry;
920 if (interpreter_type != INTERPRETER_AOUT)
921 sys_close(elf_exec_fileno);
923 set_binfmt(&elf_format);
926 * Map the vsyscall trampoline. This address is then passed via
929 #ifdef __HAVE_ARCH_VSYSCALL
934 current->flags &= ~PF_FORKNOEXEC;
935 create_elf_tables(bprm, &elf_ex, (interpreter_type == INTERPRETER_AOUT),
936 load_addr, interp_load_addr);
937 /* N.B. passed_fileno might not be initialized? */
938 if (interpreter_type == INTERPRETER_AOUT)
939 current->mm->arg_start += strlen(passed_fileno) + 1;
940 current->mm->end_code = end_code;
941 current->mm->start_code = start_code;
942 current->mm->start_data = start_data;
943 current->mm->end_data = end_data;
944 current->mm->start_stack = bprm->p;
946 #ifdef __HAVE_ARCH_RANDOMIZE_BRK
947 if (current->flags & PF_RELOCEXEC)
948 randomize_brk(elf_brk);
950 if (current->personality & MMAP_PAGE_ZERO) {
951 /* Why this, you ask??? Well SVr4 maps page 0 as read-only,
952 and some applications "depend" upon this behavior.
953 Since we do not have the power to recompile these, we
954 emulate the SVr4 behavior. Sigh. */
955 down_write(¤t->mm->mmap_sem);
956 error = do_mmap(NULL, 0, PAGE_SIZE, PROT_READ | PROT_EXEC,
957 MAP_FIXED | MAP_PRIVATE, 0);
958 up_write(¤t->mm->mmap_sem);
963 * The ABI may specify that certain registers be set up in special
964 * ways (on i386 %edx is the address of a DT_FINI function, for
965 * example. In addition, it may also specify (eg, PowerPC64 ELF)
966 * that the e_entry field is the address of the function descriptor
967 * for the startup routine, rather than the address of the startup
968 * routine itself. This macro performs whatever initialization to
969 * the regs structure is required as well as any relocations to the
970 * function descriptor entries when executing dynamically links apps.
972 ELF_PLAT_INIT(regs, reloc_func_desc);
975 start_thread(regs, elf_entry, bprm->p);
976 if (unlikely(current->ptrace & PT_PTRACED)) {
977 if (current->ptrace & PT_TRACE_EXEC)
978 ptrace_notify ((PTRACE_EVENT_EXEC << 8) | SIGTRAP);
980 send_sig(SIGTRAP, current, 0);
988 allow_write_access(interpreter);
993 kfree(elf_interpreter);
995 sys_close(elf_exec_fileno);
998 put_files_struct(current->files);
999 current->files = files;
1003 current->flags &= ~PF_RELOCEXEC;
1004 current->flags |= old_relocexec;
1008 /* This is really simpleminded and specialized - we are loading an
1009 a.out library that is given an ELF header. */
1011 static int load_elf_library(struct file *file)
1013 struct elf_phdr *elf_phdata;
1014 unsigned long elf_bss, bss, len;
1015 int retval, error, i, j;
1016 struct elfhdr elf_ex;
1019 retval = kernel_read(file, 0, (char *) &elf_ex, sizeof(elf_ex));
1020 if (retval != sizeof(elf_ex))
1023 if (memcmp(elf_ex.e_ident, ELFMAG, SELFMAG) != 0)
1026 /* First of all, some simple consistency checks */
1027 if (elf_ex.e_type != ET_EXEC || elf_ex.e_phnum > 2 ||
1028 !elf_check_arch(&elf_ex) || !file->f_op || !file->f_op->mmap)
1031 /* Now read in all of the header information */
1033 j = sizeof(struct elf_phdr) * elf_ex.e_phnum;
1034 /* j < ELF_MIN_ALIGN because elf_ex.e_phnum <= 2 */
1037 elf_phdata = (struct elf_phdr *) kmalloc(j, GFP_KERNEL);
1042 retval = kernel_read(file, elf_ex.e_phoff, (char *) elf_phdata, j);
1046 for (j = 0, i = 0; i<elf_ex.e_phnum; i++)
1047 if ((elf_phdata + i)->p_type == PT_LOAD) j++;
1051 while (elf_phdata->p_type != PT_LOAD) elf_phdata++;
1053 /* Now use mmap to map the library into memory. */
1054 down_write(¤t->mm->mmap_sem);
1055 error = do_mmap(file,
1056 ELF_PAGESTART(elf_phdata->p_vaddr),
1057 (elf_phdata->p_filesz +
1058 ELF_PAGEOFFSET(elf_phdata->p_vaddr)),
1059 PROT_READ | PROT_WRITE | PROT_EXEC,
1060 MAP_FIXED | MAP_PRIVATE | MAP_DENYWRITE,
1061 (elf_phdata->p_offset -
1062 ELF_PAGEOFFSET(elf_phdata->p_vaddr)));
1063 up_write(¤t->mm->mmap_sem);
1064 if (error != ELF_PAGESTART(elf_phdata->p_vaddr))
1067 elf_bss = elf_phdata->p_vaddr + elf_phdata->p_filesz;
1070 len = ELF_PAGESTART(elf_phdata->p_filesz + elf_phdata->p_vaddr + ELF_MIN_ALIGN - 1);
1071 bss = elf_phdata->p_memsz + elf_phdata->p_vaddr;
1073 do_brk(len, bss - len);
1083 * Note that some platforms still use traditional core dumps and not
1084 * the ELF core dump. Each platform can select it as appropriate.
1086 #ifdef USE_ELF_CORE_DUMP
1091 * Modelled on fs/exec.c:aout_core_dump()
1092 * Jeremy Fitzhardinge <jeremy@sw.oz.au>
1095 * These are the only things you should do on a core-file: use only these
1096 * functions to write out all the necessary info.
1098 static int dump_write(struct file *file, const void *addr, int nr)
1100 return file->f_op->write(file, addr, nr, &file->f_pos) == nr;
1103 static int dump_seek(struct file *file, off_t off)
1105 if (file->f_op->llseek) {
1106 if (file->f_op->llseek(file, off, 0) != off)
1114 * Decide whether a segment is worth dumping; default is yes to be
1115 * sure (missing info is worse than too much; etc).
1116 * Personally I'd include everything, and use the coredump limit...
1118 * I think we should skip something. But I am not sure how. H.J.
1120 static int maydump(struct vm_area_struct *vma)
1123 * If we may not read the contents, don't allow us to dump
1124 * them either. "dump_write()" can't handle it anyway.
1126 if (!(vma->vm_flags & VM_READ))
1129 /* Do not dump I/O mapped devices! -DaveM */
1130 if (vma->vm_flags & VM_IO)
1133 if (vma->vm_flags & (VM_WRITE|VM_GROWSUP|VM_GROWSDOWN))
1135 if (vma->vm_flags & (VM_READ|VM_EXEC|VM_EXECUTABLE|VM_SHARED))
1141 #define roundup(x, y) ((((x)+((y)-1))/(y))*(y))
1143 /* An ELF note in memory */
1148 unsigned int datasz;
1152 static int notesize(struct memelfnote *en)
1156 sz = sizeof(struct elf_note);
1157 sz += roundup(strlen(en->name) + 1, 4);
1158 sz += roundup(en->datasz, 4);
1163 #define DUMP_WRITE(addr, nr) \
1164 do { if (!dump_write(file, (addr), (nr))) return 0; } while(0)
1165 #define DUMP_SEEK(off) \
1166 do { if (!dump_seek(file, (off))) return 0; } while(0)
1168 static int writenote(struct memelfnote *men, struct file *file)
1172 en.n_namesz = strlen(men->name) + 1;
1173 en.n_descsz = men->datasz;
1174 en.n_type = men->type;
1176 DUMP_WRITE(&en, sizeof(en));
1177 DUMP_WRITE(men->name, en.n_namesz);
1178 /* XXX - cast from long long to long to avoid need for libgcc.a */
1179 DUMP_SEEK(roundup((unsigned long)file->f_pos, 4)); /* XXX */
1180 DUMP_WRITE(men->data, men->datasz);
1181 DUMP_SEEK(roundup((unsigned long)file->f_pos, 4)); /* XXX */
1188 #define DUMP_WRITE(addr, nr) \
1189 if ((size += (nr)) > limit || !dump_write(file, (addr), (nr))) \
1191 #define DUMP_SEEK(off) \
1192 if (!dump_seek(file, (off))) \
1195 static inline void fill_elf_header(struct elfhdr *elf, int segs)
1197 memcpy(elf->e_ident, ELFMAG, SELFMAG);
1198 elf->e_ident[EI_CLASS] = ELF_CLASS;
1199 elf->e_ident[EI_DATA] = ELF_DATA;
1200 elf->e_ident[EI_VERSION] = EV_CURRENT;
1201 elf->e_ident[EI_OSABI] = ELF_OSABI;
1202 memset(elf->e_ident+EI_PAD, 0, EI_NIDENT-EI_PAD);
1204 elf->e_type = ET_CORE;
1205 elf->e_machine = ELF_ARCH;
1206 elf->e_version = EV_CURRENT;
1208 elf->e_phoff = sizeof(struct elfhdr);
1211 elf->e_ehsize = sizeof(struct elfhdr);
1212 elf->e_phentsize = sizeof(struct elf_phdr);
1213 elf->e_phnum = segs;
1214 elf->e_shentsize = 0;
1216 elf->e_shstrndx = 0;
1220 static inline void fill_elf_note_phdr(struct elf_phdr *phdr, int sz, off_t offset)
1222 phdr->p_type = PT_NOTE;
1223 phdr->p_offset = offset;
1226 phdr->p_filesz = sz;
1233 static void fill_note(struct memelfnote *note, const char *name, int type,
1234 unsigned int sz, void *data)
1244 * fill up all the fields in prstatus from the given task struct, except registers
1245 * which need to be filled up separately.
1247 static void fill_prstatus(struct elf_prstatus *prstatus,
1248 struct task_struct *p, long signr)
1250 prstatus->pr_info.si_signo = prstatus->pr_cursig = signr;
1251 prstatus->pr_sigpend = p->pending.signal.sig[0];
1252 prstatus->pr_sighold = p->blocked.sig[0];
1253 prstatus->pr_pid = p->pid;
1254 prstatus->pr_ppid = p->parent->pid;
1255 prstatus->pr_pgrp = process_group(p);
1256 prstatus->pr_sid = p->signal->session;
1257 jiffies_to_timeval(p->utime, &prstatus->pr_utime);
1258 jiffies_to_timeval(p->stime, &prstatus->pr_stime);
1259 jiffies_to_timeval(p->cutime, &prstatus->pr_cutime);
1260 jiffies_to_timeval(p->cstime, &prstatus->pr_cstime);
1263 static void fill_psinfo(struct elf_prpsinfo *psinfo, struct task_struct *p,
1264 struct mm_struct *mm)
1268 /* first copy the parameters from user space */
1269 memset(psinfo, 0, sizeof(struct elf_prpsinfo));
1271 len = mm->arg_end - mm->arg_start;
1272 if (len >= ELF_PRARGSZ)
1273 len = ELF_PRARGSZ-1;
1274 copy_from_user(&psinfo->pr_psargs,
1275 (const char __user *)mm->arg_start, len);
1276 for(i = 0; i < len; i++)
1277 if (psinfo->pr_psargs[i] == 0)
1278 psinfo->pr_psargs[i] = ' ';
1279 psinfo->pr_psargs[len] = 0;
1281 psinfo->pr_pid = p->pid;
1282 psinfo->pr_ppid = p->parent->pid;
1283 psinfo->pr_pgrp = process_group(p);
1284 psinfo->pr_sid = p->signal->session;
1286 i = p->state ? ffz(~p->state) + 1 : 0;
1287 psinfo->pr_state = i;
1288 psinfo->pr_sname = (i < 0 || i > 5) ? '.' : "RSDTZW"[i];
1289 psinfo->pr_zomb = psinfo->pr_sname == 'Z';
1290 psinfo->pr_nice = task_nice(p);
1291 psinfo->pr_flag = p->flags;
1292 SET_UID(psinfo->pr_uid, p->uid);
1293 SET_GID(psinfo->pr_gid, p->gid);
1294 strncpy(psinfo->pr_fname, p->comm, sizeof(psinfo->pr_fname));
1299 /* Here is the structure in which status of each thread is captured. */
1300 struct elf_thread_status
1302 struct list_head list;
1303 struct elf_prstatus prstatus; /* NT_PRSTATUS */
1304 elf_fpregset_t fpu; /* NT_PRFPREG */
1305 #ifdef ELF_CORE_COPY_XFPREGS
1306 elf_fpxregset_t xfpu; /* NT_PRXFPREG */
1308 struct memelfnote notes[3];
1313 * In order to add the specific thread information for the elf file format,
1314 * we need to keep a linked list of every threads pr_status and then
1315 * create a single section for them in the final core file.
1317 static int elf_dump_thread_status(long signr, struct task_struct * p, struct list_head * thread_list)
1320 struct elf_thread_status *t;
1323 t = kmalloc(sizeof(*t), GFP_ATOMIC);
1326 memset(t, 0, sizeof(*t));
1328 INIT_LIST_HEAD(&t->list);
1331 fill_prstatus(&t->prstatus, p, signr);
1332 elf_core_copy_task_regs(p, &t->prstatus.pr_reg);
1334 fill_note(&t->notes[0], "CORE", NT_PRSTATUS, sizeof(t->prstatus), &(t->prstatus));
1336 sz += notesize(&t->notes[0]);
1338 if ((t->prstatus.pr_fpvalid = elf_core_copy_task_fpregs(p, NULL, &t->fpu))) {
1339 fill_note(&t->notes[1], "CORE", NT_PRFPREG, sizeof(t->fpu), &(t->fpu));
1341 sz += notesize(&t->notes[1]);
1344 #ifdef ELF_CORE_COPY_XFPREGS
1345 if (elf_core_copy_task_xfpregs(p, &t->xfpu)) {
1346 fill_note(&t->notes[2], "LINUX", NT_PRXFPREG, sizeof(t->xfpu), &t->xfpu);
1348 sz += notesize(&t->notes[2]);
1351 list_add(&t->list, thread_list);
1358 * This is a two-pass process; first we find the offsets of the bits,
1359 * and then they are actually written out. If we run out of core limit
1362 static int elf_core_dump(long signr, struct pt_regs * regs, struct file * file)
1370 struct vm_area_struct *vma;
1371 struct elfhdr *elf = NULL;
1372 off_t offset = 0, dataoff;
1373 unsigned long limit = current->rlim[RLIMIT_CORE].rlim_cur;
1375 struct memelfnote *notes = NULL;
1376 struct elf_prstatus *prstatus = NULL; /* NT_PRSTATUS */
1377 struct elf_prpsinfo *psinfo = NULL; /* NT_PRPSINFO */
1378 struct task_struct *g, *p;
1379 LIST_HEAD(thread_list);
1380 struct list_head *t;
1381 elf_fpregset_t *fpu = NULL;
1382 #ifdef ELF_CORE_COPY_XFPREGS
1383 elf_fpxregset_t *xfpu = NULL;
1385 int thread_status_size = 0;
1389 * We no longer stop all VM operations.
1391 * This is because those proceses that could possibly change map_count or
1392 * the mmap / vma pages are now blocked in do_exit on current finishing
1395 * Only ptrace can touch these memory addresses, but it doesn't change
1396 * the map_count or the pages allocated. So no possibility of crashing
1397 * exists while dumping the mm->vm_next areas to the core file.
1400 /* alloc memory for large data structures: too large to be on stack */
1401 elf = kmalloc(sizeof(*elf), GFP_KERNEL);
1404 prstatus = kmalloc(sizeof(*prstatus), GFP_KERNEL);
1407 psinfo = kmalloc(sizeof(*psinfo), GFP_KERNEL);
1410 notes = kmalloc(NUM_NOTES * sizeof(struct memelfnote), GFP_KERNEL);
1413 fpu = kmalloc(sizeof(*fpu), GFP_KERNEL);
1416 #ifdef ELF_CORE_COPY_XFPREGS
1417 xfpu = kmalloc(sizeof(*xfpu), GFP_KERNEL);
1422 /* capture the status of all other threads */
1424 read_lock(&tasklist_lock);
1426 if (current->mm == p->mm && current != p) {
1427 int sz = elf_dump_thread_status(signr, p, &thread_list);
1429 read_unlock(&tasklist_lock);
1432 thread_status_size += sz;
1434 while_each_thread(g,p);
1435 read_unlock(&tasklist_lock);
1438 /* now collect the dump for the current */
1439 memset(prstatus, 0, sizeof(*prstatus));
1440 fill_prstatus(prstatus, current, signr);
1441 elf_core_copy_regs(&prstatus->pr_reg, regs);
1443 segs = current->mm->map_count;
1444 #ifdef ELF_CORE_EXTRA_PHDRS
1445 segs += ELF_CORE_EXTRA_PHDRS;
1449 fill_elf_header(elf, segs+1); /* including notes section */
1452 current->flags |= PF_DUMPCORE;
1455 * Set up the notes in similar form to SVR4 core dumps made
1456 * with info from their /proc.
1459 fill_note(notes +0, "CORE", NT_PRSTATUS, sizeof(*prstatus), prstatus);
1461 fill_psinfo(psinfo, current->group_leader, current->mm);
1462 fill_note(notes +1, "CORE", NT_PRPSINFO, sizeof(*psinfo), psinfo);
1464 fill_note(notes +2, "CORE", NT_TASKSTRUCT, sizeof(*current), current);
1468 auxv = (elf_addr_t *) current->mm->saved_auxv;
1473 while (auxv[i - 2] != AT_NULL);
1474 fill_note(¬es[numnote++], "CORE", NT_AUXV,
1475 i * sizeof (elf_addr_t), auxv);
1477 /* Try to dump the FPU. */
1478 if ((prstatus->pr_fpvalid = elf_core_copy_task_fpregs(current, regs, fpu)))
1479 fill_note(notes + numnote++,
1480 "CORE", NT_PRFPREG, sizeof(*fpu), fpu);
1481 #ifdef ELF_CORE_COPY_XFPREGS
1482 if (elf_core_copy_task_xfpregs(current, xfpu))
1483 fill_note(notes + numnote++,
1484 "LINUX", NT_PRXFPREG, sizeof(*xfpu), xfpu);
1490 DUMP_WRITE(elf, sizeof(*elf));
1491 offset += sizeof(*elf); /* Elf header */
1492 offset += (segs+1) * sizeof(struct elf_phdr); /* Program headers */
1494 /* Write notes phdr entry */
1496 struct elf_phdr phdr;
1499 for (i = 0; i < numnote; i++)
1500 sz += notesize(notes + i);
1502 sz += thread_status_size;
1504 fill_elf_note_phdr(&phdr, sz, offset);
1506 DUMP_WRITE(&phdr, sizeof(phdr));
1509 /* Page-align dumped data */
1510 dataoff = offset = roundup(offset, ELF_EXEC_PAGESIZE);
1512 /* Write program headers for segments dump */
1513 for (vma = current->mm->mmap; vma != NULL; vma = vma->vm_next) {
1514 struct elf_phdr phdr;
1517 sz = vma->vm_end - vma->vm_start;
1519 phdr.p_type = PT_LOAD;
1520 phdr.p_offset = offset;
1521 phdr.p_vaddr = vma->vm_start;
1523 phdr.p_filesz = maydump(vma) ? sz : 0;
1525 offset += phdr.p_filesz;
1526 phdr.p_flags = vma->vm_flags & VM_READ ? PF_R : 0;
1527 if (vma->vm_flags & VM_WRITE) phdr.p_flags |= PF_W;
1528 if (vma->vm_flags & VM_EXEC) phdr.p_flags |= PF_X;
1529 phdr.p_align = ELF_EXEC_PAGESIZE;
1531 DUMP_WRITE(&phdr, sizeof(phdr));
1534 #ifdef ELF_CORE_WRITE_EXTRA_PHDRS
1535 ELF_CORE_WRITE_EXTRA_PHDRS;
1538 /* write out the notes section */
1539 for (i = 0; i < numnote; i++)
1540 if (!writenote(notes + i, file))
1543 /* write out the thread status notes section */
1544 list_for_each(t, &thread_list) {
1545 struct elf_thread_status *tmp = list_entry(t, struct elf_thread_status, list);
1546 for (i = 0; i < tmp->num_notes; i++)
1547 if (!writenote(&tmp->notes[i], file))
1553 for (vma = current->mm->mmap; vma != NULL; vma = vma->vm_next) {
1559 for (addr = vma->vm_start;
1561 addr += PAGE_SIZE) {
1563 struct vm_area_struct *vma;
1565 if (get_user_pages(current, current->mm, addr, 1, 0, 1,
1566 &page, &vma) <= 0) {
1567 DUMP_SEEK (file->f_pos + PAGE_SIZE);
1569 if (page == ZERO_PAGE(addr)) {
1570 DUMP_SEEK (file->f_pos + PAGE_SIZE);
1573 flush_cache_page(vma, addr);
1575 if ((size += PAGE_SIZE) > limit ||
1576 !dump_write(file, kaddr,
1579 page_cache_release(page);
1584 page_cache_release(page);
1589 #ifdef ELF_CORE_WRITE_EXTRA_DATA
1590 ELF_CORE_WRITE_EXTRA_DATA;
1593 if ((off_t) file->f_pos != offset) {
1595 printk("elf_core_dump: file->f_pos (%ld) != offset (%ld)\n",
1596 (off_t) file->f_pos, offset);
1603 while(!list_empty(&thread_list)) {
1604 struct list_head *tmp = thread_list.next;
1606 kfree(list_entry(tmp, struct elf_thread_status, list));
1614 #ifdef ELF_CORE_COPY_XFPREGS
1621 #endif /* USE_ELF_CORE_DUMP */
1623 static int __init init_elf_binfmt(void)
1625 return register_binfmt(&elf_format);
1628 static void __exit exit_elf_binfmt(void)
1630 /* Remove the COFF and ELF loaders. */
1631 unregister_binfmt(&elf_format);
1634 core_initcall(init_elf_binfmt);
1635 module_exit(exit_elf_binfmt);
1636 MODULE_LICENSE("GPL");