2 * linux/fs/binfmt_elf.c
4 * These are the functions used to load ELF format executables as used
5 * on SVr4 machines. Information on the format may be found in the book
6 * "UNIX SYSTEM V RELEASE 4 Programmers Guide: Ansi C and Programming Support
9 * Copyright 1993, 1994: Eric Youngdale (ericy@cais.com).
12 #include <linux/module.h>
13 #include <linux/kernel.h>
15 #include <linux/stat.h>
16 #include <linux/time.h>
18 #include <linux/mman.h>
19 #include <linux/a.out.h>
20 #include <linux/errno.h>
21 #include <linux/signal.h>
22 #include <linux/binfmts.h>
23 #include <linux/string.h>
24 #include <linux/file.h>
25 #include <linux/fcntl.h>
26 #include <linux/ptrace.h>
27 #include <linux/slab.h>
28 #include <linux/shm.h>
29 #include <linux/personality.h>
30 #include <linux/elfcore.h>
31 #include <linux/init.h>
32 #include <linux/highuid.h>
33 #include <linux/smp.h>
34 #include <linux/smp_lock.h>
35 #include <linux/compiler.h>
36 #include <linux/highmem.h>
37 #include <linux/pagemap.h>
38 #include <linux/security.h>
39 #include <linux/syscalls.h>
41 #include <asm/uaccess.h>
42 #include <asm/param.h>
44 #include <linux/elf.h>
46 static int load_elf_binary(struct linux_binprm * bprm, struct pt_regs * regs);
47 static int load_elf_library(struct file*);
48 static unsigned long elf_map (struct file *, unsigned long, struct elf_phdr *, int, int, unsigned long);
49 extern int dump_fpu (struct pt_regs *, elf_fpregset_t *);
52 #define elf_addr_t unsigned long
56 * If we don't support core dumping, then supply a NULL so we
59 #ifdef USE_ELF_CORE_DUMP
60 static int elf_core_dump(long signr, struct pt_regs * regs, struct file * file);
62 #define elf_core_dump NULL
65 #if ELF_EXEC_PAGESIZE > PAGE_SIZE
66 # define ELF_MIN_ALIGN ELF_EXEC_PAGESIZE
68 # define ELF_MIN_ALIGN PAGE_SIZE
71 #define ELF_PAGESTART(_v) ((_v) & ~(unsigned long)(ELF_MIN_ALIGN-1))
72 #define ELF_PAGEOFFSET(_v) ((_v) & (ELF_MIN_ALIGN-1))
73 #define ELF_PAGEALIGN(_v) (((_v) + ELF_MIN_ALIGN - 1) & ~(ELF_MIN_ALIGN - 1))
75 static struct linux_binfmt elf_format = {
76 .module = THIS_MODULE,
77 .load_binary = load_elf_binary,
78 .load_shlib = load_elf_library,
79 .core_dump = elf_core_dump,
80 .min_coredump = ELF_EXEC_PAGESIZE
83 #define BAD_ADDR(x) ((unsigned long)(x) > TASK_SIZE)
85 static int set_brk(unsigned long start, unsigned long end)
87 start = ELF_PAGEALIGN(start);
88 end = ELF_PAGEALIGN(end);
90 unsigned long addr = do_brk(start, end - start);
94 current->mm->start_brk = current->mm->brk = end;
99 /* We need to explicitly zero any fractional pages
100 after the data section (i.e. bss). This would
101 contain the junk from the file that should not
105 static void padzero(unsigned long elf_bss)
109 nbyte = ELF_PAGEOFFSET(elf_bss);
111 nbyte = ELF_MIN_ALIGN - nbyte;
112 clear_user((void __user *) elf_bss, nbyte);
116 /* Let's use some macros to make this stack manipulation a litle clearer */
117 #ifdef CONFIG_STACK_GROWSUP
118 #define STACK_ADD(sp, items) ((elf_addr_t __user *)(sp) + (items))
119 #define STACK_ROUND(sp, items) \
120 ((15 + (unsigned long) ((sp) + (items))) &~ 15UL)
121 #define STACK_ALLOC(sp, len) ({ elf_addr_t __user *old_sp = (elf_addr_t __user *)sp; sp += len; old_sp; })
123 #define STACK_ADD(sp, items) ((elf_addr_t __user *)(sp) - (items))
124 #define STACK_ROUND(sp, items) \
125 (((unsigned long) (sp - items)) &~ 15UL)
126 #define STACK_ALLOC(sp, len) ({ sp -= len ; sp; })
130 create_elf_tables(struct linux_binprm *bprm, struct elfhdr * exec,
131 int interp_aout, unsigned long load_addr,
132 unsigned long interp_load_addr)
134 unsigned long p = bprm->p;
135 int argc = bprm->argc;
136 int envc = bprm->envc;
137 elf_addr_t __user *argv;
138 elf_addr_t __user *envp;
139 elf_addr_t __user *sp;
140 elf_addr_t __user *u_platform;
141 const char *k_platform = ELF_PLATFORM;
143 elf_addr_t *elf_info;
145 struct task_struct *tsk = current;
148 * If this architecture has a platform capability string, copy it
149 * to userspace. In some cases (Sparc), this info is impossible
150 * for userspace to get any other way, in others (i386) it is
156 size_t len = strlen(k_platform) + 1;
158 #ifdef __HAVE_ARCH_ALIGN_STACK
159 p = (unsigned long)arch_align_stack((unsigned long)p);
161 u_platform = (elf_addr_t __user *)STACK_ALLOC(p, len);
162 __copy_to_user(u_platform, k_platform, len);
165 /* Create the ELF interpreter info */
166 elf_info = (elf_addr_t *) current->mm->saved_auxv;
167 #define NEW_AUX_ENT(id, val) \
168 do { elf_info[ei_index++] = id; elf_info[ei_index++] = val; } while (0)
172 * ARCH_DLINFO must come first so PPC can do its special alignment of
177 NEW_AUX_ENT(AT_HWCAP, ELF_HWCAP);
178 NEW_AUX_ENT(AT_PAGESZ, ELF_EXEC_PAGESIZE);
179 NEW_AUX_ENT(AT_CLKTCK, CLOCKS_PER_SEC);
180 NEW_AUX_ENT(AT_PHDR, load_addr + exec->e_phoff);
181 NEW_AUX_ENT(AT_PHENT, sizeof (struct elf_phdr));
182 NEW_AUX_ENT(AT_PHNUM, exec->e_phnum);
183 NEW_AUX_ENT(AT_BASE, interp_load_addr);
184 NEW_AUX_ENT(AT_FLAGS, 0);
185 NEW_AUX_ENT(AT_ENTRY, exec->e_entry);
186 NEW_AUX_ENT(AT_UID, (elf_addr_t) tsk->uid);
187 NEW_AUX_ENT(AT_EUID, (elf_addr_t) tsk->euid);
188 NEW_AUX_ENT(AT_GID, (elf_addr_t) tsk->gid);
189 NEW_AUX_ENT(AT_EGID, (elf_addr_t) tsk->egid);
190 NEW_AUX_ENT(AT_SECURE, (elf_addr_t) security_bprm_secureexec(bprm));
192 NEW_AUX_ENT(AT_PLATFORM, (elf_addr_t)(long)u_platform);
195 /* AT_NULL is zero; clear the rest too */
196 memset(&elf_info[ei_index], 0,
197 sizeof current->mm->saved_auxv - ei_index * sizeof elf_info[0]);
199 /* And advance past the AT_NULL entry. */
202 sp = STACK_ADD(p, ei_index);
204 items = (argc + 1) + (envc + 1);
206 items += 3; /* a.out interpreters require argv & envp too */
208 items += 1; /* ELF interpreters only put argc on the stack */
210 bprm->p = STACK_ROUND(sp, items);
212 /* Point sp at the lowest address on the stack */
213 #ifdef CONFIG_STACK_GROWSUP
214 sp = (elf_addr_t __user *)bprm->p - items - ei_index;
215 bprm->exec = (unsigned long) sp; /* XXX: PARISC HACK */
217 sp = (elf_addr_t __user *)bprm->p;
220 /* Now, let's put argc (and argv, envp if appropriate) on the stack */
221 __put_user(argc, sp++);
224 envp = argv + argc + 1;
225 __put_user((elf_addr_t)(long)argv, sp++);
226 __put_user((elf_addr_t)(long)envp, sp++);
229 envp = argv + argc + 1;
232 /* Populate argv and envp */
233 p = current->mm->arg_start;
236 __put_user((elf_addr_t)p, argv++);
237 len = strnlen_user((void __user *)p, PAGE_SIZE*MAX_ARG_PAGES);
238 if (!len || len > PAGE_SIZE*MAX_ARG_PAGES)
243 current->mm->arg_end = current->mm->env_start = p;
246 __put_user((elf_addr_t)p, envp++);
247 len = strnlen_user((void __user *)p, PAGE_SIZE*MAX_ARG_PAGES);
248 if (!len || len > PAGE_SIZE*MAX_ARG_PAGES)
253 current->mm->env_end = p;
255 /* Put the elf_info on the stack in the right place. */
256 sp = (elf_addr_t __user *)envp + 1;
257 copy_to_user(sp, elf_info, ei_index * sizeof(elf_addr_t));
262 static unsigned long elf_map(struct file *filep, unsigned long addr,
263 struct elf_phdr *eppnt, int prot, int type,
264 unsigned long total_size)
266 unsigned long map_addr;
267 unsigned long size = eppnt->p_filesz + ELF_PAGEOFFSET(eppnt->p_vaddr);
268 unsigned long off = eppnt->p_offset - ELF_PAGEOFFSET(eppnt->p_vaddr);
270 addr = ELF_PAGESTART(addr);
271 size = ELF_PAGEALIGN(size);
273 down_write(¤t->mm->mmap_sem);
276 * total_size is the size of the ELF (interpreter) image.
277 * The _first_ mmap needs to know the full size, otherwise
278 * randomization might put this image into an overlapping
279 * position with the ELF binary image. (since size < total_size)
280 * So we first map the 'big' image - and unmap the remainder at
281 * the end. (which unmap is needed for ELF images with holes.)
284 total_size = ELF_PAGEALIGN(total_size);
285 map_addr = do_mmap(filep, addr, total_size, prot, type, off);
286 if (!BAD_ADDR(map_addr))
287 do_munmap(current->mm, map_addr+size, total_size-size);
289 map_addr = do_mmap(filep, addr, size, prot, type, off);
291 up_write(¤t->mm->mmap_sem);
296 #endif /* !elf_map */
298 static inline unsigned long total_mapping_size(struct elf_phdr *cmds, int nr)
300 int i, first_idx = -1, last_idx = -1;
302 for (i = 0; i < nr; i++)
303 if (cmds[i].p_type == PT_LOAD) {
312 return cmds[last_idx].p_vaddr + cmds[last_idx].p_memsz -
313 ELF_PAGESTART(cmds[first_idx].p_vaddr);
316 /* This is much more generalized than the library routine read function,
317 so we keep this separate. Technically the library read function
318 is only provided so that we can read a.out libraries that have
321 static unsigned long load_elf_interp(struct elfhdr * interp_elf_ex,
322 struct file * interpreter,
323 unsigned long *interp_load_addr,
324 unsigned long no_base)
326 struct elf_phdr *elf_phdata;
327 struct elf_phdr *eppnt;
328 unsigned long load_addr = 0;
329 int load_addr_set = 0;
330 unsigned long last_bss = 0, elf_bss = 0;
331 unsigned long error = ~0UL;
332 unsigned long total_size;
335 /* First of all, some simple consistency checks */
336 if (interp_elf_ex->e_type != ET_EXEC &&
337 interp_elf_ex->e_type != ET_DYN)
339 if (!elf_check_arch(interp_elf_ex))
341 if (!interpreter->f_op || !interpreter->f_op->mmap)
345 * If the size of this structure has changed, then punt, since
346 * we will be doing the wrong thing.
348 if (interp_elf_ex->e_phentsize != sizeof(struct elf_phdr))
350 if (interp_elf_ex->e_phnum > 65536U / sizeof(struct elf_phdr))
353 /* Now read in all of the header information */
355 size = sizeof(struct elf_phdr) * interp_elf_ex->e_phnum;
356 if (size > ELF_MIN_ALIGN)
358 elf_phdata = (struct elf_phdr *) kmalloc(size, GFP_KERNEL);
362 retval = kernel_read(interpreter,interp_elf_ex->e_phoff,(char *)elf_phdata,size);
367 total_size = total_mapping_size(elf_phdata, interp_elf_ex->e_phnum);
372 for (i=0; i<interp_elf_ex->e_phnum; i++, eppnt++) {
373 if (eppnt->p_type == PT_LOAD) {
374 int elf_type = MAP_PRIVATE | MAP_DENYWRITE;
376 unsigned long vaddr = 0;
377 unsigned long k, map_addr;
379 if (eppnt->p_flags & PF_R) elf_prot = PROT_READ;
380 if (eppnt->p_flags & PF_W) elf_prot |= PROT_WRITE;
381 if (eppnt->p_flags & PF_X) elf_prot |= PROT_EXEC;
382 vaddr = eppnt->p_vaddr;
383 if (interp_elf_ex->e_type == ET_EXEC || load_addr_set)
384 elf_type |= MAP_FIXED;
385 else if (no_base && interp_elf_ex->e_type == ET_DYN)
388 map_addr = elf_map(interpreter, load_addr + vaddr, eppnt, elf_prot, elf_type, total_size);
391 if (BAD_ADDR(map_addr))
394 if (!load_addr_set && interp_elf_ex->e_type == ET_DYN) {
395 load_addr = map_addr - ELF_PAGESTART(vaddr);
400 * Check to see if the section's size will overflow the
401 * allowed task size. Note that p_filesz must always be
402 * <= p_memsize so it is only necessary to check p_memsz.
404 k = load_addr + eppnt->p_vaddr;
405 if (k > TASK_SIZE || eppnt->p_filesz > eppnt->p_memsz ||
406 eppnt->p_memsz > TASK_SIZE || TASK_SIZE - eppnt->p_memsz < k) {
412 * Find the end of the file mapping for this phdr, and keep
413 * track of the largest address we see for this.
415 k = load_addr + eppnt->p_vaddr + eppnt->p_filesz;
420 * Do the same thing for the memory mapping - between
421 * elf_bss and last_bss is the bss section.
423 k = load_addr + eppnt->p_memsz + eppnt->p_vaddr;
430 * Now fill out the bss section. First pad the last page up
431 * to the page boundary, and then perform a mmap to make sure
432 * that there are zero-mapped pages up to and including the
436 elf_bss = ELF_PAGESTART(elf_bss + ELF_MIN_ALIGN - 1); /* What we have mapped so far */
438 /* Map the last of the bss segment */
439 if (last_bss > elf_bss) {
440 error = do_brk(elf_bss, last_bss - elf_bss);
445 *interp_load_addr = load_addr;
446 error = ((unsigned long) interp_elf_ex->e_entry) + load_addr;
454 static unsigned long load_aout_interp(struct exec * interp_ex,
455 struct file * interpreter)
457 unsigned long text_data, elf_entry = ~0UL;
461 current->mm->end_code = interp_ex->a_text;
462 text_data = interp_ex->a_text + interp_ex->a_data;
463 current->mm->end_data = text_data;
464 current->mm->brk = interp_ex->a_bss + text_data;
466 switch (N_MAGIC(*interp_ex)) {
469 addr = (char __user *)0;
473 offset = N_TXTOFF(*interp_ex);
474 addr = (char __user *) N_TXTADDR(*interp_ex);
480 do_brk(0, text_data);
481 if (!interpreter->f_op || !interpreter->f_op->read)
483 if (interpreter->f_op->read(interpreter, addr, text_data, &offset) < 0)
485 flush_icache_range((unsigned long)addr,
486 (unsigned long)addr + text_data);
488 do_brk(ELF_PAGESTART(text_data + ELF_MIN_ALIGN - 1),
490 elf_entry = interp_ex->a_entry;
497 * These are the functions used to load ELF style executables and shared
498 * libraries. There is no binary dependent code anywhere else.
501 #define INTERPRETER_NONE 0
502 #define INTERPRETER_AOUT 1
503 #define INTERPRETER_ELF 2
506 static int load_elf_binary(struct linux_binprm * bprm, struct pt_regs * regs)
508 struct file *interpreter = NULL; /* to shut gcc up */
509 unsigned long load_addr = 0, load_bias = 0;
510 int load_addr_set = 0;
511 char * elf_interpreter = NULL;
512 unsigned int interpreter_type = INTERPRETER_NONE;
513 unsigned char ibcs2_interpreter = 0;
515 struct elf_phdr * elf_ppnt, *elf_phdata;
516 unsigned long elf_bss, elf_brk;
520 unsigned long elf_entry, interp_load_addr = 0;
521 unsigned long start_code, end_code, start_data, end_data;
522 unsigned long reloc_func_desc = 0;
523 struct elfhdr elf_ex;
524 struct elfhdr interp_elf_ex;
525 struct exec interp_ex;
526 char passed_fileno[6];
527 struct files_struct *files;
528 int executable_stack, relocexec, old_relocexec = current->flags & PF_RELOCEXEC;
530 /* Get the exec-header */
531 elf_ex = *((struct elfhdr *) bprm->buf);
534 /* First of all, some simple consistency checks */
535 if (memcmp(elf_ex.e_ident, ELFMAG, SELFMAG) != 0)
538 if (elf_ex.e_type != ET_EXEC && elf_ex.e_type != ET_DYN)
540 if (!elf_check_arch(&elf_ex))
542 if (!bprm->file->f_op||!bprm->file->f_op->mmap)
545 /* Now read in all of the header information */
548 if (elf_ex.e_phentsize != sizeof(struct elf_phdr))
550 if (elf_ex.e_phnum > 65536U / sizeof(struct elf_phdr))
552 size = elf_ex.e_phnum * sizeof(struct elf_phdr);
553 elf_phdata = (struct elf_phdr *) kmalloc(size, GFP_KERNEL);
557 retval = kernel_read(bprm->file, elf_ex.e_phoff, (char *) elf_phdata, size);
561 files = current->files; /* Refcounted so ok */
562 retval = unshare_files();
565 if (files == current->files) {
566 put_files_struct(files);
570 /* exec will make our files private anyway, but for the a.out
571 loader stuff we need to do it earlier */
573 retval = get_unused_fd();
576 get_file(bprm->file);
577 fd_install(elf_exec_fileno = retval, bprm->file);
579 elf_ppnt = elf_phdata;
588 for (i = 0; i < elf_ex.e_phnum; i++) {
589 if (elf_ppnt->p_type == PT_INTERP) {
590 /* This is the program interpreter used for
591 * shared libraries - for now assume that this
592 * is an a.out format binary
596 if (elf_ppnt->p_filesz > PATH_MAX)
598 elf_interpreter = (char *) kmalloc(elf_ppnt->p_filesz,
600 if (!elf_interpreter)
603 retval = kernel_read(bprm->file, elf_ppnt->p_offset,
607 goto out_free_interp;
608 /* If the program interpreter is one of these two,
609 * then assume an iBCS2 image. Otherwise assume
610 * a native linux image.
612 if (strcmp(elf_interpreter,"/usr/lib/libc.so.1") == 0 ||
613 strcmp(elf_interpreter,"/usr/lib/ld.so.1") == 0)
614 ibcs2_interpreter = 1;
617 * The early SET_PERSONALITY here is so that the lookup
618 * for the interpreter happens in the namespace of the
619 * to-be-execed image. SET_PERSONALITY can select an
622 * However, SET_PERSONALITY is NOT allowed to switch
623 * this task into the new images's memory mapping
624 * policy - that is, TASK_SIZE must still evaluate to
625 * that which is appropriate to the execing application.
626 * This is because exit_mmap() needs to have TASK_SIZE
627 * evaluate to the size of the old image.
629 * So if (say) a 64-bit application is execing a 32-bit
630 * application it is the architecture's responsibility
631 * to defer changing the value of TASK_SIZE until the
632 * switch really is going to happen - do this in
633 * flush_thread(). - akpm
635 SET_PERSONALITY(elf_ex, ibcs2_interpreter);
637 interpreter = open_exec(elf_interpreter);
638 retval = PTR_ERR(interpreter);
639 if (IS_ERR(interpreter))
640 goto out_free_interp;
641 retval = kernel_read(interpreter, 0, bprm->buf, BINPRM_BUF_SIZE);
643 goto out_free_dentry;
645 /* Get the exec headers */
646 interp_ex = *((struct exec *) bprm->buf);
647 interp_elf_ex = *((struct elfhdr *) bprm->buf);
653 elf_ppnt = elf_phdata;
654 executable_stack = EXSTACK_DEFAULT;
656 for (i = 0; i < elf_ex.e_phnum; i++, elf_ppnt++)
657 if (elf_ppnt->p_type == PT_GNU_STACK) {
658 if (elf_ppnt->p_flags & PF_X)
659 executable_stack = EXSTACK_ENABLE_X;
661 executable_stack = EXSTACK_DISABLE_X;
667 if (current->personality == PER_LINUX)
668 switch (exec_shield) {
670 if (executable_stack != EXSTACK_DEFAULT) {
671 current->flags |= PF_RELOCEXEC;
672 relocexec = PF_RELOCEXEC;
677 executable_stack = EXSTACK_DISABLE_X;
678 current->flags |= PF_RELOCEXEC;
679 relocexec = PF_RELOCEXEC;
683 /* Some simple consistency checks for the interpreter */
684 if (elf_interpreter) {
685 interpreter_type = INTERPRETER_ELF | INTERPRETER_AOUT;
687 /* Now figure out which format our binary is */
688 if ((N_MAGIC(interp_ex) != OMAGIC) &&
689 (N_MAGIC(interp_ex) != ZMAGIC) &&
690 (N_MAGIC(interp_ex) != QMAGIC))
691 interpreter_type = INTERPRETER_ELF;
693 if (memcmp(interp_elf_ex.e_ident, ELFMAG, SELFMAG) != 0)
694 interpreter_type &= ~INTERPRETER_ELF;
697 if (!interpreter_type)
698 goto out_free_dentry;
700 /* Make sure only one type was selected */
701 if ((interpreter_type & INTERPRETER_ELF) &&
702 interpreter_type != INTERPRETER_ELF) {
703 // FIXME - ratelimit this before re-enabling
704 // printk(KERN_WARNING "ELF: Ambiguous type, using ELF\n");
705 interpreter_type = INTERPRETER_ELF;
707 /* Verify the interpreter has a valid arch */
708 if ((interpreter_type == INTERPRETER_ELF) &&
709 !elf_check_arch(&interp_elf_ex))
710 goto out_free_dentry;
712 /* Executables without an interpreter also need a personality */
713 SET_PERSONALITY(elf_ex, ibcs2_interpreter);
716 /* OK, we are done with that, now set up the arg stuff,
717 and then start this sucker up */
719 if ((!bprm->sh_bang) && (interpreter_type == INTERPRETER_AOUT)) {
720 char *passed_p = passed_fileno;
721 sprintf(passed_fileno, "%d", elf_exec_fileno);
723 if (elf_interpreter) {
724 retval = copy_strings_kernel(1, &passed_p, bprm);
726 goto out_free_dentry;
731 /* Flush all traces of the currently running executable */
732 retval = flush_old_exec(bprm);
734 goto out_free_dentry;
735 current->flags |= relocexec;
739 * Turn off the CS limit completely if exec-shield disabled or
742 if (!exec_shield || use_nx)
743 arch_add_exec_range(current->mm, -1);
746 /* Discard our unneeded old files struct */
749 put_files_struct(files);
753 /* OK, This is the point of no return */
754 current->mm->start_data = 0;
755 current->mm->end_data = 0;
756 current->mm->end_code = 0;
757 current->mm->mmap = NULL;
758 #ifdef __HAVE_ARCH_MMAP_TOP
759 current->mm->mmap_top = mmap_top();
761 current->flags &= ~PF_FORKNOEXEC;
763 /* Do this immediately, since STACK_TOP as used in setup_arg_pages
764 may depend on the personality. */
765 SET_PERSONALITY(elf_ex, ibcs2_interpreter);
767 /* Do this so that we can load the interpreter, if need be. We will
768 change some of these later */
769 current->mm->rss = 0;
770 current->mm->free_area_cache = TASK_UNMAPPED_BASE;
771 current->mm->non_executable_cache = current->mm->mmap_top;
772 retval = setup_arg_pages(bprm, executable_stack);
774 send_sig(SIGKILL, current, 0);
775 goto out_free_dentry;
778 current->mm->start_stack = bprm->p;
781 /* Now we do a little grungy work by mmaping the ELF image into
782 the correct location in memory.
785 for(i = 0, elf_ppnt = elf_phdata; i < elf_ex.e_phnum; i++, elf_ppnt++) {
786 int elf_prot = 0, elf_flags;
787 unsigned long k, vaddr;
789 if (elf_ppnt->p_type != PT_LOAD)
792 if (unlikely (elf_brk > elf_bss)) {
795 /* There was a PT_LOAD segment with p_memsz > p_filesz
796 before this one. Map anonymous pages, if needed,
797 and clear the area. */
798 retval = set_brk (elf_bss + load_bias,
799 elf_brk + load_bias);
801 send_sig(SIGKILL, current, 0);
802 goto out_free_dentry;
804 nbyte = ELF_PAGEOFFSET(elf_bss);
806 nbyte = ELF_MIN_ALIGN - nbyte;
807 if (nbyte > elf_brk - elf_bss)
808 nbyte = elf_brk - elf_bss;
809 clear_user((void __user *) elf_bss + load_bias, nbyte);
813 if (elf_ppnt->p_flags & PF_R) elf_prot |= PROT_READ;
814 if (elf_ppnt->p_flags & PF_W) elf_prot |= PROT_WRITE;
815 if (elf_ppnt->p_flags & PF_X) elf_prot |= PROT_EXEC;
817 elf_flags = MAP_PRIVATE|MAP_DENYWRITE|MAP_EXECUTABLE;
819 vaddr = elf_ppnt->p_vaddr;
820 if (elf_ex.e_type == ET_EXEC || load_addr_set)
821 elf_flags |= MAP_FIXED;
822 else if (elf_ex.e_type == ET_DYN)
826 load_bias = ELF_PAGESTART(ELF_ET_DYN_BASE - vaddr);
829 error = elf_map(bprm->file, load_bias + vaddr, elf_ppnt, elf_prot, elf_flags, 0);
833 if (!load_addr_set) {
835 load_addr = (elf_ppnt->p_vaddr - elf_ppnt->p_offset);
836 if (elf_ex.e_type == ET_DYN) {
838 ELF_PAGESTART(load_bias + vaddr);
839 load_addr += load_bias;
840 reloc_func_desc = load_bias;
843 k = elf_ppnt->p_vaddr;
844 if (k < start_code) start_code = k;
845 if (start_data < k) start_data = k;
848 * Check to see if the section's size will overflow the
849 * allowed task size. Note that p_filesz must always be
850 * <= p_memsz so it is only necessary to check p_memsz.
852 if (k > TASK_SIZE || elf_ppnt->p_filesz > elf_ppnt->p_memsz ||
853 elf_ppnt->p_memsz > TASK_SIZE ||
854 TASK_SIZE - elf_ppnt->p_memsz < k) {
855 /* set_brk can never work. Avoid overflows. */
856 send_sig(SIGKILL, current, 0);
857 goto out_free_dentry;
860 k = elf_ppnt->p_vaddr + elf_ppnt->p_filesz;
864 if ((elf_ppnt->p_flags & PF_X) && end_code < k)
868 k = elf_ppnt->p_vaddr + elf_ppnt->p_memsz;
873 elf_ex.e_entry += load_bias;
874 elf_bss += load_bias;
875 elf_brk += load_bias;
876 start_code += load_bias;
877 end_code += load_bias;
878 start_data += load_bias;
879 end_data += load_bias;
881 /* Calling set_brk effectively mmaps the pages that we need
882 * for the bss and break sections. We must do this before
883 * mapping in the interpreter, to make sure it doesn't wind
884 * up getting placed where the bss needs to go.
886 retval = set_brk(elf_bss, elf_brk);
888 send_sig(SIGKILL, current, 0);
889 goto out_free_dentry;
893 if (elf_interpreter) {
894 if (interpreter_type == INTERPRETER_AOUT)
895 elf_entry = load_aout_interp(&interp_ex,
898 elf_entry = load_elf_interp(&interp_elf_ex,
902 if (BAD_ADDR(elf_entry)) {
903 printk(KERN_ERR "Unable to load interpreter\n");
904 send_sig(SIGSEGV, current, 0);
905 retval = -ENOEXEC; /* Nobody gets to see this, but.. */
906 goto out_free_dentry;
908 reloc_func_desc = interp_load_addr;
910 allow_write_access(interpreter);
912 kfree(elf_interpreter);
914 elf_entry = elf_ex.e_entry;
919 if (interpreter_type != INTERPRETER_AOUT)
920 sys_close(elf_exec_fileno);
922 set_binfmt(&elf_format);
925 * Map the vsyscall trampoline. This address is then passed via
928 #ifdef __HAVE_ARCH_VSYSCALL
933 current->flags &= ~PF_FORKNOEXEC;
934 create_elf_tables(bprm, &elf_ex, (interpreter_type == INTERPRETER_AOUT),
935 load_addr, interp_load_addr);
936 /* N.B. passed_fileno might not be initialized? */
937 if (interpreter_type == INTERPRETER_AOUT)
938 current->mm->arg_start += strlen(passed_fileno) + 1;
939 current->mm->end_code = end_code;
940 current->mm->start_code = start_code;
941 current->mm->start_data = start_data;
942 current->mm->end_data = end_data;
943 current->mm->start_stack = bprm->p;
945 #ifdef __HAVE_ARCH_RANDOMIZE_BRK
946 if (current->flags & PF_RELOCEXEC)
947 randomize_brk(elf_brk);
949 if (current->personality & MMAP_PAGE_ZERO) {
950 /* Why this, you ask??? Well SVr4 maps page 0 as read-only,
951 and some applications "depend" upon this behavior.
952 Since we do not have the power to recompile these, we
953 emulate the SVr4 behavior. Sigh. */
954 down_write(¤t->mm->mmap_sem);
955 error = do_mmap(NULL, 0, PAGE_SIZE, PROT_READ | PROT_EXEC,
956 MAP_FIXED | MAP_PRIVATE, 0);
957 up_write(¤t->mm->mmap_sem);
962 * The ABI may specify that certain registers be set up in special
963 * ways (on i386 %edx is the address of a DT_FINI function, for
964 * example. In addition, it may also specify (eg, PowerPC64 ELF)
965 * that the e_entry field is the address of the function descriptor
966 * for the startup routine, rather than the address of the startup
967 * routine itself. This macro performs whatever initialization to
968 * the regs structure is required as well as any relocations to the
969 * function descriptor entries when executing dynamically links apps.
971 ELF_PLAT_INIT(regs, reloc_func_desc);
974 start_thread(regs, elf_entry, bprm->p);
975 if (unlikely(current->ptrace & PT_PTRACED)) {
976 if (current->ptrace & PT_TRACE_EXEC)
977 ptrace_notify ((PTRACE_EVENT_EXEC << 8) | SIGTRAP);
979 send_sig(SIGTRAP, current, 0);
987 allow_write_access(interpreter);
992 kfree(elf_interpreter);
994 sys_close(elf_exec_fileno);
997 put_files_struct(current->files);
998 current->files = files;
1002 current->flags &= ~PF_RELOCEXEC;
1003 current->flags |= old_relocexec;
1007 /* This is really simpleminded and specialized - we are loading an
1008 a.out library that is given an ELF header. */
1010 static int load_elf_library(struct file *file)
1012 struct elf_phdr *elf_phdata;
1013 unsigned long elf_bss, bss, len;
1014 int retval, error, i, j;
1015 struct elfhdr elf_ex;
1018 retval = kernel_read(file, 0, (char *) &elf_ex, sizeof(elf_ex));
1019 if (retval != sizeof(elf_ex))
1022 if (memcmp(elf_ex.e_ident, ELFMAG, SELFMAG) != 0)
1025 /* First of all, some simple consistency checks */
1026 if (elf_ex.e_type != ET_EXEC || elf_ex.e_phnum > 2 ||
1027 !elf_check_arch(&elf_ex) || !file->f_op || !file->f_op->mmap)
1030 /* Now read in all of the header information */
1032 j = sizeof(struct elf_phdr) * elf_ex.e_phnum;
1033 /* j < ELF_MIN_ALIGN because elf_ex.e_phnum <= 2 */
1036 elf_phdata = (struct elf_phdr *) kmalloc(j, GFP_KERNEL);
1041 retval = kernel_read(file, elf_ex.e_phoff, (char *) elf_phdata, j);
1045 for (j = 0, i = 0; i<elf_ex.e_phnum; i++)
1046 if ((elf_phdata + i)->p_type == PT_LOAD) j++;
1050 while (elf_phdata->p_type != PT_LOAD) elf_phdata++;
1052 /* Now use mmap to map the library into memory. */
1053 down_write(¤t->mm->mmap_sem);
1054 error = do_mmap(file,
1055 ELF_PAGESTART(elf_phdata->p_vaddr),
1056 (elf_phdata->p_filesz +
1057 ELF_PAGEOFFSET(elf_phdata->p_vaddr)),
1058 PROT_READ | PROT_WRITE | PROT_EXEC,
1059 MAP_FIXED | MAP_PRIVATE | MAP_DENYWRITE,
1060 (elf_phdata->p_offset -
1061 ELF_PAGEOFFSET(elf_phdata->p_vaddr)));
1062 up_write(¤t->mm->mmap_sem);
1063 if (error != ELF_PAGESTART(elf_phdata->p_vaddr))
1066 elf_bss = elf_phdata->p_vaddr + elf_phdata->p_filesz;
1069 len = ELF_PAGESTART(elf_phdata->p_filesz + elf_phdata->p_vaddr + ELF_MIN_ALIGN - 1);
1070 bss = elf_phdata->p_memsz + elf_phdata->p_vaddr;
1072 do_brk(len, bss - len);
1082 * Note that some platforms still use traditional core dumps and not
1083 * the ELF core dump. Each platform can select it as appropriate.
1085 #ifdef USE_ELF_CORE_DUMP
1090 * Modelled on fs/exec.c:aout_core_dump()
1091 * Jeremy Fitzhardinge <jeremy@sw.oz.au>
1094 * These are the only things you should do on a core-file: use only these
1095 * functions to write out all the necessary info.
1097 static int dump_write(struct file *file, const void *addr, int nr)
1099 return file->f_op->write(file, addr, nr, &file->f_pos) == nr;
1102 static int dump_seek(struct file *file, off_t off)
1104 if (file->f_op->llseek) {
1105 if (file->f_op->llseek(file, off, 0) != off)
1113 * Decide whether a segment is worth dumping; default is yes to be
1114 * sure (missing info is worse than too much; etc).
1115 * Personally I'd include everything, and use the coredump limit...
1117 * I think we should skip something. But I am not sure how. H.J.
1119 static int maydump(struct vm_area_struct *vma)
1122 * If we may not read the contents, don't allow us to dump
1123 * them either. "dump_write()" can't handle it anyway.
1125 if (!(vma->vm_flags & VM_READ))
1128 /* Do not dump I/O mapped devices! -DaveM */
1129 if (vma->vm_flags & VM_IO)
1132 if (vma->vm_flags & (VM_WRITE|VM_GROWSUP|VM_GROWSDOWN))
1134 if (vma->vm_flags & (VM_READ|VM_EXEC|VM_EXECUTABLE|VM_SHARED))
1140 #define roundup(x, y) ((((x)+((y)-1))/(y))*(y))
1142 /* An ELF note in memory */
1147 unsigned int datasz;
1151 static int notesize(struct memelfnote *en)
1155 sz = sizeof(struct elf_note);
1156 sz += roundup(strlen(en->name) + 1, 4);
1157 sz += roundup(en->datasz, 4);
1162 #define DUMP_WRITE(addr, nr) \
1163 do { if (!dump_write(file, (addr), (nr))) return 0; } while(0)
1164 #define DUMP_SEEK(off) \
1165 do { if (!dump_seek(file, (off))) return 0; } while(0)
1167 static int writenote(struct memelfnote *men, struct file *file)
1171 en.n_namesz = strlen(men->name) + 1;
1172 en.n_descsz = men->datasz;
1173 en.n_type = men->type;
1175 DUMP_WRITE(&en, sizeof(en));
1176 DUMP_WRITE(men->name, en.n_namesz);
1177 /* XXX - cast from long long to long to avoid need for libgcc.a */
1178 DUMP_SEEK(roundup((unsigned long)file->f_pos, 4)); /* XXX */
1179 DUMP_WRITE(men->data, men->datasz);
1180 DUMP_SEEK(roundup((unsigned long)file->f_pos, 4)); /* XXX */
1187 #define DUMP_WRITE(addr, nr) \
1188 if ((size += (nr)) > limit || !dump_write(file, (addr), (nr))) \
1190 #define DUMP_SEEK(off) \
1191 if (!dump_seek(file, (off))) \
1194 static inline void fill_elf_header(struct elfhdr *elf, int segs)
1196 memcpy(elf->e_ident, ELFMAG, SELFMAG);
1197 elf->e_ident[EI_CLASS] = ELF_CLASS;
1198 elf->e_ident[EI_DATA] = ELF_DATA;
1199 elf->e_ident[EI_VERSION] = EV_CURRENT;
1200 elf->e_ident[EI_OSABI] = ELF_OSABI;
1201 memset(elf->e_ident+EI_PAD, 0, EI_NIDENT-EI_PAD);
1203 elf->e_type = ET_CORE;
1204 elf->e_machine = ELF_ARCH;
1205 elf->e_version = EV_CURRENT;
1207 elf->e_phoff = sizeof(struct elfhdr);
1210 elf->e_ehsize = sizeof(struct elfhdr);
1211 elf->e_phentsize = sizeof(struct elf_phdr);
1212 elf->e_phnum = segs;
1213 elf->e_shentsize = 0;
1215 elf->e_shstrndx = 0;
1219 static inline void fill_elf_note_phdr(struct elf_phdr *phdr, int sz, off_t offset)
1221 phdr->p_type = PT_NOTE;
1222 phdr->p_offset = offset;
1225 phdr->p_filesz = sz;
1232 static void fill_note(struct memelfnote *note, const char *name, int type,
1233 unsigned int sz, void *data)
1243 * fill up all the fields in prstatus from the given task struct, except registers
1244 * which need to be filled up separately.
1246 static void fill_prstatus(struct elf_prstatus *prstatus,
1247 struct task_struct *p, long signr)
1249 prstatus->pr_info.si_signo = prstatus->pr_cursig = signr;
1250 prstatus->pr_sigpend = p->pending.signal.sig[0];
1251 prstatus->pr_sighold = p->blocked.sig[0];
1252 prstatus->pr_pid = p->pid;
1253 prstatus->pr_ppid = p->parent->pid;
1254 prstatus->pr_pgrp = process_group(p);
1255 prstatus->pr_sid = p->signal->session;
1256 jiffies_to_timeval(p->utime, &prstatus->pr_utime);
1257 jiffies_to_timeval(p->stime, &prstatus->pr_stime);
1258 jiffies_to_timeval(p->cutime, &prstatus->pr_cutime);
1259 jiffies_to_timeval(p->cstime, &prstatus->pr_cstime);
1262 static void fill_psinfo(struct elf_prpsinfo *psinfo, struct task_struct *p,
1263 struct mm_struct *mm)
1267 /* first copy the parameters from user space */
1268 memset(psinfo, 0, sizeof(struct elf_prpsinfo));
1270 len = mm->arg_end - mm->arg_start;
1271 if (len >= ELF_PRARGSZ)
1272 len = ELF_PRARGSZ-1;
1273 copy_from_user(&psinfo->pr_psargs,
1274 (const char __user *)mm->arg_start, len);
1275 for(i = 0; i < len; i++)
1276 if (psinfo->pr_psargs[i] == 0)
1277 psinfo->pr_psargs[i] = ' ';
1278 psinfo->pr_psargs[len] = 0;
1280 psinfo->pr_pid = p->pid;
1281 psinfo->pr_ppid = p->parent->pid;
1282 psinfo->pr_pgrp = process_group(p);
1283 psinfo->pr_sid = p->signal->session;
1285 i = p->state ? ffz(~p->state) + 1 : 0;
1286 psinfo->pr_state = i;
1287 psinfo->pr_sname = (i < 0 || i > 5) ? '.' : "RSDTZW"[i];
1288 psinfo->pr_zomb = psinfo->pr_sname == 'Z';
1289 psinfo->pr_nice = task_nice(p);
1290 psinfo->pr_flag = p->flags;
1291 SET_UID(psinfo->pr_uid, p->uid);
1292 SET_GID(psinfo->pr_gid, p->gid);
1293 strncpy(psinfo->pr_fname, p->comm, sizeof(psinfo->pr_fname));
1298 /* Here is the structure in which status of each thread is captured. */
1299 struct elf_thread_status
1301 struct list_head list;
1302 struct elf_prstatus prstatus; /* NT_PRSTATUS */
1303 elf_fpregset_t fpu; /* NT_PRFPREG */
1304 #ifdef ELF_CORE_COPY_XFPREGS
1305 elf_fpxregset_t xfpu; /* NT_PRXFPREG */
1307 struct memelfnote notes[3];
1312 * In order to add the specific thread information for the elf file format,
1313 * we need to keep a linked list of every threads pr_status and then
1314 * create a single section for them in the final core file.
1316 static int elf_dump_thread_status(long signr, struct task_struct * p, struct list_head * thread_list)
1319 struct elf_thread_status *t;
1322 t = kmalloc(sizeof(*t), GFP_ATOMIC);
1325 memset(t, 0, sizeof(*t));
1327 INIT_LIST_HEAD(&t->list);
1330 fill_prstatus(&t->prstatus, p, signr);
1331 elf_core_copy_task_regs(p, &t->prstatus.pr_reg);
1333 fill_note(&t->notes[0], "CORE", NT_PRSTATUS, sizeof(t->prstatus), &(t->prstatus));
1335 sz += notesize(&t->notes[0]);
1337 if ((t->prstatus.pr_fpvalid = elf_core_copy_task_fpregs(p, NULL, &t->fpu))) {
1338 fill_note(&t->notes[1], "CORE", NT_PRFPREG, sizeof(t->fpu), &(t->fpu));
1340 sz += notesize(&t->notes[1]);
1343 #ifdef ELF_CORE_COPY_XFPREGS
1344 if (elf_core_copy_task_xfpregs(p, &t->xfpu)) {
1345 fill_note(&t->notes[2], "LINUX", NT_PRXFPREG, sizeof(t->xfpu), &t->xfpu);
1347 sz += notesize(&t->notes[2]);
1350 list_add(&t->list, thread_list);
1357 * This is a two-pass process; first we find the offsets of the bits,
1358 * and then they are actually written out. If we run out of core limit
1361 static int elf_core_dump(long signr, struct pt_regs * regs, struct file * file)
1369 struct vm_area_struct *vma;
1370 struct elfhdr *elf = NULL;
1371 off_t offset = 0, dataoff;
1372 unsigned long limit = current->rlim[RLIMIT_CORE].rlim_cur;
1374 struct memelfnote *notes = NULL;
1375 struct elf_prstatus *prstatus = NULL; /* NT_PRSTATUS */
1376 struct elf_prpsinfo *psinfo = NULL; /* NT_PRPSINFO */
1377 struct task_struct *g, *p;
1378 LIST_HEAD(thread_list);
1379 struct list_head *t;
1380 elf_fpregset_t *fpu = NULL;
1381 #ifdef ELF_CORE_COPY_XFPREGS
1382 elf_fpxregset_t *xfpu = NULL;
1384 int thread_status_size = 0;
1388 * We no longer stop all VM operations.
1390 * This is because those proceses that could possibly change map_count or
1391 * the mmap / vma pages are now blocked in do_exit on current finishing
1394 * Only ptrace can touch these memory addresses, but it doesn't change
1395 * the map_count or the pages allocated. So no possibility of crashing
1396 * exists while dumping the mm->vm_next areas to the core file.
1399 /* alloc memory for large data structures: too large to be on stack */
1400 elf = kmalloc(sizeof(*elf), GFP_KERNEL);
1403 prstatus = kmalloc(sizeof(*prstatus), GFP_KERNEL);
1406 psinfo = kmalloc(sizeof(*psinfo), GFP_KERNEL);
1409 notes = kmalloc(NUM_NOTES * sizeof(struct memelfnote), GFP_KERNEL);
1412 fpu = kmalloc(sizeof(*fpu), GFP_KERNEL);
1415 #ifdef ELF_CORE_COPY_XFPREGS
1416 xfpu = kmalloc(sizeof(*xfpu), GFP_KERNEL);
1421 /* capture the status of all other threads */
1423 read_lock(&tasklist_lock);
1425 if (current->mm == p->mm && current != p) {
1426 int sz = elf_dump_thread_status(signr, p, &thread_list);
1428 read_unlock(&tasklist_lock);
1431 thread_status_size += sz;
1433 while_each_thread(g,p);
1434 read_unlock(&tasklist_lock);
1437 /* now collect the dump for the current */
1438 memset(prstatus, 0, sizeof(*prstatus));
1439 fill_prstatus(prstatus, current, signr);
1440 elf_core_copy_regs(&prstatus->pr_reg, regs);
1442 segs = current->mm->map_count;
1443 #ifdef ELF_CORE_EXTRA_PHDRS
1444 segs += ELF_CORE_EXTRA_PHDRS;
1448 fill_elf_header(elf, segs+1); /* including notes section */
1451 current->flags |= PF_DUMPCORE;
1454 * Set up the notes in similar form to SVR4 core dumps made
1455 * with info from their /proc.
1458 fill_note(notes +0, "CORE", NT_PRSTATUS, sizeof(*prstatus), prstatus);
1460 fill_psinfo(psinfo, current->group_leader, current->mm);
1461 fill_note(notes +1, "CORE", NT_PRPSINFO, sizeof(*psinfo), psinfo);
1463 fill_note(notes +2, "CORE", NT_TASKSTRUCT, sizeof(*current), current);
1467 auxv = (elf_addr_t *) current->mm->saved_auxv;
1472 while (auxv[i - 2] != AT_NULL);
1473 fill_note(¬es[numnote++], "CORE", NT_AUXV,
1474 i * sizeof (elf_addr_t), auxv);
1476 /* Try to dump the FPU. */
1477 if ((prstatus->pr_fpvalid = elf_core_copy_task_fpregs(current, regs, fpu)))
1478 fill_note(notes + numnote++,
1479 "CORE", NT_PRFPREG, sizeof(*fpu), fpu);
1480 #ifdef ELF_CORE_COPY_XFPREGS
1481 if (elf_core_copy_task_xfpregs(current, xfpu))
1482 fill_note(notes + numnote++,
1483 "LINUX", NT_PRXFPREG, sizeof(*xfpu), xfpu);
1489 DUMP_WRITE(elf, sizeof(*elf));
1490 offset += sizeof(*elf); /* Elf header */
1491 offset += (segs+1) * sizeof(struct elf_phdr); /* Program headers */
1493 /* Write notes phdr entry */
1495 struct elf_phdr phdr;
1498 for (i = 0; i < numnote; i++)
1499 sz += notesize(notes + i);
1501 sz += thread_status_size;
1503 fill_elf_note_phdr(&phdr, sz, offset);
1505 DUMP_WRITE(&phdr, sizeof(phdr));
1508 /* Page-align dumped data */
1509 dataoff = offset = roundup(offset, ELF_EXEC_PAGESIZE);
1511 /* Write program headers for segments dump */
1512 for (vma = current->mm->mmap; vma != NULL; vma = vma->vm_next) {
1513 struct elf_phdr phdr;
1516 sz = vma->vm_end - vma->vm_start;
1518 phdr.p_type = PT_LOAD;
1519 phdr.p_offset = offset;
1520 phdr.p_vaddr = vma->vm_start;
1522 phdr.p_filesz = maydump(vma) ? sz : 0;
1524 offset += phdr.p_filesz;
1525 phdr.p_flags = vma->vm_flags & VM_READ ? PF_R : 0;
1526 if (vma->vm_flags & VM_WRITE) phdr.p_flags |= PF_W;
1527 if (vma->vm_flags & VM_EXEC) phdr.p_flags |= PF_X;
1528 phdr.p_align = ELF_EXEC_PAGESIZE;
1530 DUMP_WRITE(&phdr, sizeof(phdr));
1533 #ifdef ELF_CORE_WRITE_EXTRA_PHDRS
1534 ELF_CORE_WRITE_EXTRA_PHDRS;
1537 /* write out the notes section */
1538 for (i = 0; i < numnote; i++)
1539 if (!writenote(notes + i, file))
1542 /* write out the thread status notes section */
1543 list_for_each(t, &thread_list) {
1544 struct elf_thread_status *tmp = list_entry(t, struct elf_thread_status, list);
1545 for (i = 0; i < tmp->num_notes; i++)
1546 if (!writenote(&tmp->notes[i], file))
1552 for (vma = current->mm->mmap; vma != NULL; vma = vma->vm_next) {
1558 for (addr = vma->vm_start;
1560 addr += PAGE_SIZE) {
1562 struct vm_area_struct *vma;
1564 if (get_user_pages(current, current->mm, addr, 1, 0, 1,
1565 &page, &vma) <= 0) {
1566 DUMP_SEEK (file->f_pos + PAGE_SIZE);
1568 if (page == ZERO_PAGE(addr)) {
1569 DUMP_SEEK (file->f_pos + PAGE_SIZE);
1572 flush_cache_page(vma, addr);
1574 if ((size += PAGE_SIZE) > limit ||
1575 !dump_write(file, kaddr,
1578 page_cache_release(page);
1583 page_cache_release(page);
1588 #ifdef ELF_CORE_WRITE_EXTRA_DATA
1589 ELF_CORE_WRITE_EXTRA_DATA;
1592 if ((off_t) file->f_pos != offset) {
1594 printk("elf_core_dump: file->f_pos (%ld) != offset (%ld)\n",
1595 (off_t) file->f_pos, offset);
1602 while(!list_empty(&thread_list)) {
1603 struct list_head *tmp = thread_list.next;
1605 kfree(list_entry(tmp, struct elf_thread_status, list));
1613 #ifdef ELF_CORE_COPY_XFPREGS
1620 #endif /* USE_ELF_CORE_DUMP */
1622 static int __init init_elf_binfmt(void)
1624 return register_binfmt(&elf_format);
1627 static void __exit exit_elf_binfmt(void)
1629 /* Remove the COFF and ELF loaders. */
1630 unregister_binfmt(&elf_format);
1633 core_initcall(init_elf_binfmt);
1634 module_exit(exit_elf_binfmt);
1635 MODULE_LICENSE("GPL");