2 * linux/fs/binfmt_elf.c
4 * These are the functions used to load ELF format executables as used
5 * on SVr4 machines. Information on the format may be found in the book
6 * "UNIX SYSTEM V RELEASE 4 Programmers Guide: Ansi C and Programming Support
9 * Copyright 1993, 1994: Eric Youngdale (ericy@cais.com).
12 #include <linux/module.h>
13 #include <linux/kernel.h>
15 #include <linux/stat.h>
16 #include <linux/time.h>
18 #include <linux/mman.h>
19 #include <linux/a.out.h>
20 #include <linux/errno.h>
21 #include <linux/signal.h>
22 #include <linux/binfmts.h>
23 #include <linux/string.h>
24 #include <linux/file.h>
25 #include <linux/fcntl.h>
26 #include <linux/ptrace.h>
27 #include <linux/slab.h>
28 #include <linux/shm.h>
29 #include <linux/personality.h>
30 #include <linux/elfcore.h>
31 #include <linux/init.h>
32 #include <linux/highuid.h>
33 #include <linux/smp.h>
34 #include <linux/smp_lock.h>
35 #include <linux/compiler.h>
36 #include <linux/highmem.h>
37 #include <linux/pagemap.h>
38 #include <linux/security.h>
39 #include <linux/syscalls.h>
41 #include <asm/uaccess.h>
42 #include <asm/param.h>
43 #include <asm/pgalloc.h>
45 #include <linux/elf.h>
47 static int load_elf_binary(struct linux_binprm * bprm, struct pt_regs * regs);
48 static int load_elf_library(struct file*);
49 static unsigned long elf_map (struct file *, unsigned long, struct elf_phdr *, int, int, unsigned long);
50 extern int dump_fpu (struct pt_regs *, elf_fpregset_t *);
53 #define elf_addr_t unsigned long
57 * If we don't support core dumping, then supply a NULL so we
60 #ifdef USE_ELF_CORE_DUMP
61 static int elf_core_dump(long signr, struct pt_regs * regs, struct file * file);
63 #define elf_core_dump NULL
66 #if ELF_EXEC_PAGESIZE > PAGE_SIZE
67 # define ELF_MIN_ALIGN ELF_EXEC_PAGESIZE
69 # define ELF_MIN_ALIGN PAGE_SIZE
72 #define ELF_PAGESTART(_v) ((_v) & ~(unsigned long)(ELF_MIN_ALIGN-1))
73 #define ELF_PAGEOFFSET(_v) ((_v) & (ELF_MIN_ALIGN-1))
74 #define ELF_PAGEALIGN(_v) (((_v) + ELF_MIN_ALIGN - 1) & ~(ELF_MIN_ALIGN - 1))
76 static struct linux_binfmt elf_format = {
77 .module = THIS_MODULE,
78 .load_binary = load_elf_binary,
79 .load_shlib = load_elf_library,
80 .core_dump = elf_core_dump,
81 .min_coredump = ELF_EXEC_PAGESIZE
84 #define BAD_ADDR(x) ((unsigned long)(x) > TASK_SIZE)
86 static int set_brk(unsigned long start, unsigned long end)
88 start = ELF_PAGEALIGN(start);
89 end = ELF_PAGEALIGN(end);
91 unsigned long addr = do_brk(start, end - start);
95 current->mm->start_brk = current->mm->brk = end;
100 /* We need to explicitly zero any fractional pages
101 after the data section (i.e. bss). This would
102 contain the junk from the file that should not
106 static void padzero(unsigned long elf_bss)
110 nbyte = ELF_PAGEOFFSET(elf_bss);
112 nbyte = ELF_MIN_ALIGN - nbyte;
113 clear_user((void __user *) elf_bss, nbyte);
117 /* Let's use some macros to make this stack manipulation a litle clearer */
118 #ifdef CONFIG_STACK_GROWSUP
119 #define STACK_ADD(sp, items) ((elf_addr_t __user *)(sp) + (items))
120 #define STACK_ROUND(sp, items) \
121 ((15 + (unsigned long) ((sp) + (items))) &~ 15UL)
122 #define STACK_ALLOC(sp, len) ({ elf_addr_t __user *old_sp = (elf_addr_t __user *)sp; sp += len; old_sp; })
124 #define STACK_ADD(sp, items) ((elf_addr_t __user *)(sp) - (items))
125 #define STACK_ROUND(sp, items) \
126 (((unsigned long) (sp - items)) &~ 15UL)
127 #define STACK_ALLOC(sp, len) ({ sp -= len ; sp; })
131 create_elf_tables(struct linux_binprm *bprm, struct elfhdr * exec,
132 int interp_aout, unsigned long load_addr,
133 unsigned long interp_load_addr)
135 unsigned long p = bprm->p;
136 int argc = bprm->argc;
137 int envc = bprm->envc;
138 elf_addr_t __user *argv;
139 elf_addr_t __user *envp;
140 elf_addr_t __user *sp;
141 elf_addr_t __user *u_platform;
142 const char *k_platform = ELF_PLATFORM;
144 elf_addr_t *elf_info;
146 struct task_struct *tsk = current;
149 * If this architecture has a platform capability string, copy it
150 * to userspace. In some cases (Sparc), this info is impossible
151 * for userspace to get any other way, in others (i386) it is
157 size_t len = strlen(k_platform) + 1;
159 #ifdef __HAVE_ARCH_ALIGN_STACK
160 p = (unsigned long)arch_align_stack((unsigned long)p);
162 u_platform = (elf_addr_t __user *)STACK_ALLOC(p, len);
163 __copy_to_user(u_platform, k_platform, len);
166 /* Create the ELF interpreter info */
167 elf_info = (elf_addr_t *) current->mm->saved_auxv;
168 #define NEW_AUX_ENT(id, val) \
169 do { elf_info[ei_index++] = id; elf_info[ei_index++] = val; } while (0)
173 * ARCH_DLINFO must come first so PPC can do its special alignment of
178 NEW_AUX_ENT(AT_HWCAP, ELF_HWCAP);
179 NEW_AUX_ENT(AT_PAGESZ, ELF_EXEC_PAGESIZE);
180 NEW_AUX_ENT(AT_CLKTCK, CLOCKS_PER_SEC);
181 NEW_AUX_ENT(AT_PHDR, load_addr + exec->e_phoff);
182 NEW_AUX_ENT(AT_PHENT, sizeof (struct elf_phdr));
183 NEW_AUX_ENT(AT_PHNUM, exec->e_phnum);
184 NEW_AUX_ENT(AT_BASE, interp_load_addr);
185 NEW_AUX_ENT(AT_FLAGS, 0);
186 NEW_AUX_ENT(AT_ENTRY, exec->e_entry);
187 NEW_AUX_ENT(AT_UID, (elf_addr_t) tsk->uid);
188 NEW_AUX_ENT(AT_EUID, (elf_addr_t) tsk->euid);
189 NEW_AUX_ENT(AT_GID, (elf_addr_t) tsk->gid);
190 NEW_AUX_ENT(AT_EGID, (elf_addr_t) tsk->egid);
191 NEW_AUX_ENT(AT_SECURE, (elf_addr_t) security_bprm_secureexec(bprm));
193 NEW_AUX_ENT(AT_PLATFORM, (elf_addr_t)(long)u_platform);
196 /* AT_NULL is zero; clear the rest too */
197 memset(&elf_info[ei_index], 0,
198 sizeof current->mm->saved_auxv - ei_index * sizeof elf_info[0]);
200 /* And advance past the AT_NULL entry. */
203 sp = STACK_ADD(p, ei_index);
205 items = (argc + 1) + (envc + 1);
207 items += 3; /* a.out interpreters require argv & envp too */
209 items += 1; /* ELF interpreters only put argc on the stack */
211 bprm->p = STACK_ROUND(sp, items);
213 /* Point sp at the lowest address on the stack */
214 #ifdef CONFIG_STACK_GROWSUP
215 sp = (elf_addr_t __user *)bprm->p - items - ei_index;
216 bprm->exec = (unsigned long) sp; /* XXX: PARISC HACK */
218 sp = (elf_addr_t __user *)bprm->p;
221 /* Now, let's put argc (and argv, envp if appropriate) on the stack */
222 __put_user(argc, sp++);
225 envp = argv + argc + 1;
226 __put_user((elf_addr_t)(long)argv, sp++);
227 __put_user((elf_addr_t)(long)envp, sp++);
230 envp = argv + argc + 1;
233 /* Populate argv and envp */
234 p = current->mm->arg_start;
237 __put_user((elf_addr_t)p, argv++);
238 len = strnlen_user((void __user *)p, PAGE_SIZE*MAX_ARG_PAGES);
239 if (!len || len > PAGE_SIZE*MAX_ARG_PAGES)
244 current->mm->arg_end = current->mm->env_start = p;
247 __put_user((elf_addr_t)p, envp++);
248 len = strnlen_user((void __user *)p, PAGE_SIZE*MAX_ARG_PAGES);
249 if (!len || len > PAGE_SIZE*MAX_ARG_PAGES)
254 current->mm->env_end = p;
256 /* Put the elf_info on the stack in the right place. */
257 sp = (elf_addr_t __user *)envp + 1;
258 copy_to_user(sp, elf_info, ei_index * sizeof(elf_addr_t));
263 static unsigned long elf_map(struct file *filep, unsigned long addr,
264 struct elf_phdr *eppnt, int prot, int type,
265 unsigned long total_size)
267 unsigned long map_addr;
268 unsigned long size = eppnt->p_filesz + ELF_PAGEOFFSET(eppnt->p_vaddr);
269 unsigned long off = eppnt->p_offset - ELF_PAGEOFFSET(eppnt->p_vaddr);
271 addr = ELF_PAGESTART(addr);
272 size = ELF_PAGEALIGN(size);
274 down_write(¤t->mm->mmap_sem);
277 * total_size is the size of the ELF (interpreter) image.
278 * The _first_ mmap needs to know the full size, otherwise
279 * randomization might put this image into an overlapping
280 * position with the ELF binary image. (since size < total_size)
281 * So we first map the 'big' image - and unmap the remainder at
282 * the end. (which unmap is needed for ELF images with holes.)
285 total_size = ELF_PAGEALIGN(total_size);
286 map_addr = do_mmap(filep, addr, total_size, prot, type, off);
287 if (!BAD_ADDR(map_addr))
288 do_munmap(current->mm, map_addr+size, total_size-size);
290 map_addr = do_mmap(filep, addr, size, prot, type, off);
292 up_write(¤t->mm->mmap_sem);
297 #endif /* !elf_map */
299 static inline unsigned long total_mapping_size(struct elf_phdr *cmds, int nr)
301 int i, first_idx = -1, last_idx = -1;
303 for (i = 0; i < nr; i++)
304 if (cmds[i].p_type == PT_LOAD) {
313 return cmds[last_idx].p_vaddr + cmds[last_idx].p_memsz -
314 ELF_PAGESTART(cmds[first_idx].p_vaddr);
317 /* This is much more generalized than the library routine read function,
318 so we keep this separate. Technically the library read function
319 is only provided so that we can read a.out libraries that have
322 static unsigned long load_elf_interp(struct elfhdr * interp_elf_ex,
323 struct file * interpreter,
324 unsigned long *interp_load_addr,
325 unsigned long no_base)
327 struct elf_phdr *elf_phdata;
328 struct elf_phdr *eppnt;
329 unsigned long load_addr = 0;
330 int load_addr_set = 0;
331 unsigned long last_bss = 0, elf_bss = 0;
332 unsigned long error = ~0UL;
333 unsigned long total_size;
336 /* First of all, some simple consistency checks */
337 if (interp_elf_ex->e_type != ET_EXEC &&
338 interp_elf_ex->e_type != ET_DYN)
340 if (!elf_check_arch(interp_elf_ex))
342 if (!interpreter->f_op || !interpreter->f_op->mmap)
346 * If the size of this structure has changed, then punt, since
347 * we will be doing the wrong thing.
349 if (interp_elf_ex->e_phentsize != sizeof(struct elf_phdr))
351 if (interp_elf_ex->e_phnum > 65536U / sizeof(struct elf_phdr))
354 /* Now read in all of the header information */
356 size = sizeof(struct elf_phdr) * interp_elf_ex->e_phnum;
357 if (size > ELF_MIN_ALIGN)
359 elf_phdata = (struct elf_phdr *) kmalloc(size, GFP_KERNEL);
363 retval = kernel_read(interpreter,interp_elf_ex->e_phoff,(char *)elf_phdata,size);
368 total_size = total_mapping_size(elf_phdata, interp_elf_ex->e_phnum);
373 for (i=0; i<interp_elf_ex->e_phnum; i++, eppnt++) {
374 if (eppnt->p_type == PT_LOAD) {
375 int elf_type = MAP_PRIVATE | MAP_DENYWRITE;
377 unsigned long vaddr = 0;
378 unsigned long k, map_addr;
380 if (eppnt->p_flags & PF_R) elf_prot = PROT_READ;
381 if (eppnt->p_flags & PF_W) elf_prot |= PROT_WRITE;
382 if (eppnt->p_flags & PF_X) elf_prot |= PROT_EXEC;
383 vaddr = eppnt->p_vaddr;
384 if (interp_elf_ex->e_type == ET_EXEC || load_addr_set)
385 elf_type |= MAP_FIXED;
386 else if (no_base && interp_elf_ex->e_type == ET_DYN)
389 map_addr = elf_map(interpreter, load_addr + vaddr, eppnt, elf_prot, elf_type, total_size);
392 if (BAD_ADDR(map_addr))
395 if (!load_addr_set && interp_elf_ex->e_type == ET_DYN) {
396 load_addr = map_addr - ELF_PAGESTART(vaddr);
401 * Check to see if the section's size will overflow the
402 * allowed task size. Note that p_filesz must always be
403 * <= p_memsize so it is only necessary to check p_memsz.
405 k = load_addr + eppnt->p_vaddr;
406 if (k > TASK_SIZE || eppnt->p_filesz > eppnt->p_memsz ||
407 eppnt->p_memsz > TASK_SIZE || TASK_SIZE - eppnt->p_memsz < k) {
413 * Find the end of the file mapping for this phdr, and keep
414 * track of the largest address we see for this.
416 k = load_addr + eppnt->p_vaddr + eppnt->p_filesz;
421 * Do the same thing for the memory mapping - between
422 * elf_bss and last_bss is the bss section.
424 k = load_addr + eppnt->p_memsz + eppnt->p_vaddr;
431 * Now fill out the bss section. First pad the last page up
432 * to the page boundary, and then perform a mmap to make sure
433 * that there are zero-mapped pages up to and including the
437 elf_bss = ELF_PAGESTART(elf_bss + ELF_MIN_ALIGN - 1); /* What we have mapped so far */
439 /* Map the last of the bss segment */
440 if (last_bss > elf_bss) {
441 error = do_brk(elf_bss, last_bss - elf_bss);
446 *interp_load_addr = load_addr;
447 error = ((unsigned long) interp_elf_ex->e_entry) + load_addr;
455 static unsigned long load_aout_interp(struct exec * interp_ex,
456 struct file * interpreter)
458 unsigned long text_data, elf_entry = ~0UL;
462 current->mm->end_code = interp_ex->a_text;
463 text_data = interp_ex->a_text + interp_ex->a_data;
464 current->mm->end_data = text_data;
465 current->mm->brk = interp_ex->a_bss + text_data;
467 switch (N_MAGIC(*interp_ex)) {
470 addr = (char __user *)0;
474 offset = N_TXTOFF(*interp_ex);
475 addr = (char __user *) N_TXTADDR(*interp_ex);
481 do_brk(0, text_data);
482 if (!interpreter->f_op || !interpreter->f_op->read)
484 if (interpreter->f_op->read(interpreter, addr, text_data, &offset) < 0)
486 flush_icache_range((unsigned long)addr,
487 (unsigned long)addr + text_data);
489 do_brk(ELF_PAGESTART(text_data + ELF_MIN_ALIGN - 1),
491 elf_entry = interp_ex->a_entry;
498 * These are the functions used to load ELF style executables and shared
499 * libraries. There is no binary dependent code anywhere else.
502 #define INTERPRETER_NONE 0
503 #define INTERPRETER_AOUT 1
504 #define INTERPRETER_ELF 2
507 static int load_elf_binary(struct linux_binprm * bprm, struct pt_regs * regs)
509 struct file *interpreter = NULL; /* to shut gcc up */
510 unsigned long load_addr = 0, load_bias = 0;
511 int load_addr_set = 0;
512 char * elf_interpreter = NULL;
513 unsigned int interpreter_type = INTERPRETER_NONE;
514 unsigned char ibcs2_interpreter = 0;
516 struct elf_phdr * elf_ppnt, *elf_phdata;
517 unsigned long elf_bss, elf_brk;
521 unsigned long elf_entry, interp_load_addr = 0;
522 unsigned long start_code, end_code, start_data, end_data;
523 unsigned long reloc_func_desc = 0;
524 struct elfhdr elf_ex;
525 struct elfhdr interp_elf_ex;
526 struct exec interp_ex;
527 char passed_fileno[6];
528 struct files_struct *files;
529 int executable_stack, relocexec, old_relocexec = current->flags & PF_RELOCEXEC;
531 /* Get the exec-header */
532 elf_ex = *((struct elfhdr *) bprm->buf);
535 /* First of all, some simple consistency checks */
536 if (memcmp(elf_ex.e_ident, ELFMAG, SELFMAG) != 0)
539 if (elf_ex.e_type != ET_EXEC && elf_ex.e_type != ET_DYN)
541 if (!elf_check_arch(&elf_ex))
543 if (!bprm->file->f_op||!bprm->file->f_op->mmap)
546 /* Now read in all of the header information */
549 if (elf_ex.e_phentsize != sizeof(struct elf_phdr))
551 if (elf_ex.e_phnum > 65536U / sizeof(struct elf_phdr))
553 size = elf_ex.e_phnum * sizeof(struct elf_phdr);
554 elf_phdata = (struct elf_phdr *) kmalloc(size, GFP_KERNEL);
558 retval = kernel_read(bprm->file, elf_ex.e_phoff, (char *) elf_phdata, size);
562 files = current->files; /* Refcounted so ok */
563 retval = unshare_files();
566 if (files == current->files) {
567 put_files_struct(files);
571 /* exec will make our files private anyway, but for the a.out
572 loader stuff we need to do it earlier */
574 retval = get_unused_fd();
577 get_file(bprm->file);
578 fd_install(elf_exec_fileno = retval, bprm->file);
580 elf_ppnt = elf_phdata;
589 for (i = 0; i < elf_ex.e_phnum; i++) {
590 if (elf_ppnt->p_type == PT_INTERP) {
591 /* This is the program interpreter used for
592 * shared libraries - for now assume that this
593 * is an a.out format binary
597 if (elf_ppnt->p_filesz > PATH_MAX)
599 elf_interpreter = (char *) kmalloc(elf_ppnt->p_filesz,
601 if (!elf_interpreter)
604 retval = kernel_read(bprm->file, elf_ppnt->p_offset,
608 goto out_free_interp;
609 /* If the program interpreter is one of these two,
610 * then assume an iBCS2 image. Otherwise assume
611 * a native linux image.
613 if (strcmp(elf_interpreter,"/usr/lib/libc.so.1") == 0 ||
614 strcmp(elf_interpreter,"/usr/lib/ld.so.1") == 0)
615 ibcs2_interpreter = 1;
618 * The early SET_PERSONALITY here is so that the lookup
619 * for the interpreter happens in the namespace of the
620 * to-be-execed image. SET_PERSONALITY can select an
623 * However, SET_PERSONALITY is NOT allowed to switch
624 * this task into the new images's memory mapping
625 * policy - that is, TASK_SIZE must still evaluate to
626 * that which is appropriate to the execing application.
627 * This is because exit_mmap() needs to have TASK_SIZE
628 * evaluate to the size of the old image.
630 * So if (say) a 64-bit application is execing a 32-bit
631 * application it is the architecture's responsibility
632 * to defer changing the value of TASK_SIZE until the
633 * switch really is going to happen - do this in
634 * flush_thread(). - akpm
636 SET_PERSONALITY(elf_ex, ibcs2_interpreter);
638 interpreter = open_exec(elf_interpreter);
639 retval = PTR_ERR(interpreter);
640 if (IS_ERR(interpreter))
641 goto out_free_interp;
642 retval = kernel_read(interpreter, 0, bprm->buf, BINPRM_BUF_SIZE);
644 goto out_free_dentry;
646 /* Get the exec headers */
647 interp_ex = *((struct exec *) bprm->buf);
648 interp_elf_ex = *((struct elfhdr *) bprm->buf);
654 elf_ppnt = elf_phdata;
655 executable_stack = EXSTACK_DEFAULT;
657 for (i = 0; i < elf_ex.e_phnum; i++, elf_ppnt++)
658 if (elf_ppnt->p_type == PT_GNU_STACK) {
659 if (elf_ppnt->p_flags & PF_X)
660 executable_stack = EXSTACK_ENABLE_X;
662 executable_stack = EXSTACK_DISABLE_X;
668 if (current->personality == PER_LINUX)
669 switch (exec_shield) {
671 if (executable_stack != EXSTACK_DEFAULT) {
672 current->flags |= PF_RELOCEXEC;
673 relocexec = PF_RELOCEXEC;
678 executable_stack = EXSTACK_DISABLE_X;
679 current->flags |= PF_RELOCEXEC;
680 relocexec = PF_RELOCEXEC;
684 /* Some simple consistency checks for the interpreter */
685 if (elf_interpreter) {
686 interpreter_type = INTERPRETER_ELF | INTERPRETER_AOUT;
688 /* Now figure out which format our binary is */
689 if ((N_MAGIC(interp_ex) != OMAGIC) &&
690 (N_MAGIC(interp_ex) != ZMAGIC) &&
691 (N_MAGIC(interp_ex) != QMAGIC))
692 interpreter_type = INTERPRETER_ELF;
694 if (memcmp(interp_elf_ex.e_ident, ELFMAG, SELFMAG) != 0)
695 interpreter_type &= ~INTERPRETER_ELF;
698 if (!interpreter_type)
699 goto out_free_dentry;
701 /* Make sure only one type was selected */
702 if ((interpreter_type & INTERPRETER_ELF) &&
703 interpreter_type != INTERPRETER_ELF) {
704 // FIXME - ratelimit this before re-enabling
705 // printk(KERN_WARNING "ELF: Ambiguous type, using ELF\n");
706 interpreter_type = INTERPRETER_ELF;
708 /* Verify the interpreter has a valid arch */
709 if ((interpreter_type == INTERPRETER_ELF) &&
710 !elf_check_arch(&interp_elf_ex))
711 goto out_free_dentry;
713 /* Executables without an interpreter also need a personality */
714 SET_PERSONALITY(elf_ex, ibcs2_interpreter);
717 /* OK, we are done with that, now set up the arg stuff,
718 and then start this sucker up */
720 if ((!bprm->sh_bang) && (interpreter_type == INTERPRETER_AOUT)) {
721 char *passed_p = passed_fileno;
722 sprintf(passed_fileno, "%d", elf_exec_fileno);
724 if (elf_interpreter) {
725 retval = copy_strings_kernel(1, &passed_p, bprm);
727 goto out_free_dentry;
732 /* Flush all traces of the currently running executable */
733 retval = flush_old_exec(bprm);
735 goto out_free_dentry;
736 current->flags |= relocexec;
740 * In the exec-shield-disabled case turn off the CS limit
744 arch_add_exec_range(current->mm, -1);
747 /* Discard our unneeded old files struct */
750 put_files_struct(files);
754 /* OK, This is the point of no return */
755 current->mm->start_data = 0;
756 current->mm->end_data = 0;
757 current->mm->end_code = 0;
758 current->mm->mmap = NULL;
759 #ifdef __HAVE_ARCH_MMAP_TOP
760 current->mm->mmap_top = mmap_top();
762 current->flags &= ~PF_FORKNOEXEC;
764 /* Do this immediately, since STACK_TOP as used in setup_arg_pages
765 may depend on the personality. */
766 SET_PERSONALITY(elf_ex, ibcs2_interpreter);
768 /* Do this so that we can load the interpreter, if need be. We will
769 change some of these later */
770 // current->mm->rss = 0;
771 vx_rsspages_sub(current->mm, current->mm->rss);
772 current->mm->free_area_cache = TASK_UNMAPPED_BASE;
773 current->mm->non_executable_cache = current->mm->mmap_top;
774 retval = setup_arg_pages(bprm, executable_stack);
776 send_sig(SIGKILL, current, 0);
777 goto out_free_dentry;
780 current->mm->start_stack = bprm->p;
783 /* Now we do a little grungy work by mmaping the ELF image into
784 the correct location in memory.
787 for(i = 0, elf_ppnt = elf_phdata; i < elf_ex.e_phnum; i++, elf_ppnt++) {
788 int elf_prot = 0, elf_flags;
789 unsigned long k, vaddr;
791 if (elf_ppnt->p_type != PT_LOAD)
794 if (unlikely (elf_brk > elf_bss)) {
797 /* There was a PT_LOAD segment with p_memsz > p_filesz
798 before this one. Map anonymous pages, if needed,
799 and clear the area. */
800 retval = set_brk (elf_bss + load_bias,
801 elf_brk + load_bias);
803 send_sig(SIGKILL, current, 0);
804 goto out_free_dentry;
806 nbyte = ELF_PAGEOFFSET(elf_bss);
808 nbyte = ELF_MIN_ALIGN - nbyte;
809 if (nbyte > elf_brk - elf_bss)
810 nbyte = elf_brk - elf_bss;
811 clear_user((void __user *) elf_bss + load_bias, nbyte);
815 if (elf_ppnt->p_flags & PF_R) elf_prot |= PROT_READ;
816 if (elf_ppnt->p_flags & PF_W) elf_prot |= PROT_WRITE;
817 if (elf_ppnt->p_flags & PF_X) elf_prot |= PROT_EXEC;
819 elf_flags = MAP_PRIVATE|MAP_DENYWRITE|MAP_EXECUTABLE;
821 vaddr = elf_ppnt->p_vaddr;
822 if (elf_ex.e_type == ET_EXEC || load_addr_set)
823 elf_flags |= MAP_FIXED;
824 else if (elf_ex.e_type == ET_DYN)
828 load_bias = ELF_PAGESTART(ELF_ET_DYN_BASE - vaddr);
831 error = elf_map(bprm->file, load_bias + vaddr, elf_ppnt, elf_prot, elf_flags, 0);
835 if (!load_addr_set) {
837 load_addr = (elf_ppnt->p_vaddr - elf_ppnt->p_offset);
838 if (elf_ex.e_type == ET_DYN) {
840 ELF_PAGESTART(load_bias + vaddr);
841 load_addr += load_bias;
842 reloc_func_desc = load_bias;
845 k = elf_ppnt->p_vaddr;
846 if (k < start_code) start_code = k;
847 if (start_data < k) start_data = k;
850 * Check to see if the section's size will overflow the
851 * allowed task size. Note that p_filesz must always be
852 * <= p_memsz so it is only necessary to check p_memsz.
854 if (k > TASK_SIZE || elf_ppnt->p_filesz > elf_ppnt->p_memsz ||
855 elf_ppnt->p_memsz > TASK_SIZE ||
856 TASK_SIZE - elf_ppnt->p_memsz < k) {
857 /* set_brk can never work. Avoid overflows. */
858 send_sig(SIGKILL, current, 0);
859 goto out_free_dentry;
862 k = elf_ppnt->p_vaddr + elf_ppnt->p_filesz;
866 if ((elf_ppnt->p_flags & PF_X) && end_code < k)
870 k = elf_ppnt->p_vaddr + elf_ppnt->p_memsz;
875 elf_ex.e_entry += load_bias;
876 elf_bss += load_bias;
877 elf_brk += load_bias;
878 start_code += load_bias;
879 end_code += load_bias;
880 start_data += load_bias;
881 end_data += load_bias;
883 /* Calling set_brk effectively mmaps the pages that we need
884 * for the bss and break sections. We must do this before
885 * mapping in the interpreter, to make sure it doesn't wind
886 * up getting placed where the bss needs to go.
888 retval = set_brk(elf_bss, elf_brk);
890 send_sig(SIGKILL, current, 0);
891 goto out_free_dentry;
895 if (elf_interpreter) {
896 if (interpreter_type == INTERPRETER_AOUT)
897 elf_entry = load_aout_interp(&interp_ex,
900 elf_entry = load_elf_interp(&interp_elf_ex,
904 if (BAD_ADDR(elf_entry)) {
905 printk(KERN_ERR "Unable to load interpreter\n");
906 send_sig(SIGSEGV, current, 0);
907 retval = -ENOEXEC; /* Nobody gets to see this, but.. */
908 goto out_free_dentry;
910 reloc_func_desc = interp_load_addr;
912 allow_write_access(interpreter);
914 kfree(elf_interpreter);
916 elf_entry = elf_ex.e_entry;
921 if (interpreter_type != INTERPRETER_AOUT)
922 sys_close(elf_exec_fileno);
924 set_binfmt(&elf_format);
927 * Map the vsyscall trampoline. This address is then passed via
930 #ifdef __HAVE_ARCH_VSYSCALL
935 current->flags &= ~PF_FORKNOEXEC;
936 create_elf_tables(bprm, &elf_ex, (interpreter_type == INTERPRETER_AOUT),
937 load_addr, interp_load_addr);
938 /* N.B. passed_fileno might not be initialized? */
939 if (interpreter_type == INTERPRETER_AOUT)
940 current->mm->arg_start += strlen(passed_fileno) + 1;
941 current->mm->end_code = end_code;
942 current->mm->start_code = start_code;
943 current->mm->start_data = start_data;
944 current->mm->end_data = end_data;
945 current->mm->start_stack = bprm->p;
947 #ifdef __HAVE_ARCH_RANDOMIZE_BRK
948 if (current->flags & PF_RELOCEXEC)
949 randomize_brk(elf_brk);
951 if (current->personality & MMAP_PAGE_ZERO) {
952 /* Why this, you ask??? Well SVr4 maps page 0 as read-only,
953 and some applications "depend" upon this behavior.
954 Since we do not have the power to recompile these, we
955 emulate the SVr4 behavior. Sigh. */
956 down_write(¤t->mm->mmap_sem);
957 error = do_mmap(NULL, 0, PAGE_SIZE, PROT_READ | PROT_EXEC,
958 MAP_FIXED | MAP_PRIVATE, 0);
959 up_write(¤t->mm->mmap_sem);
964 * The ABI may specify that certain registers be set up in special
965 * ways (on i386 %edx is the address of a DT_FINI function, for
966 * example. In addition, it may also specify (eg, PowerPC64 ELF)
967 * that the e_entry field is the address of the function descriptor
968 * for the startup routine, rather than the address of the startup
969 * routine itself. This macro performs whatever initialization to
970 * the regs structure is required as well as any relocations to the
971 * function descriptor entries when executing dynamically links apps.
973 ELF_PLAT_INIT(regs, reloc_func_desc);
976 start_thread(regs, elf_entry, bprm->p);
977 if (unlikely(current->ptrace & PT_PTRACED)) {
978 if (current->ptrace & PT_TRACE_EXEC)
979 ptrace_notify ((PTRACE_EVENT_EXEC << 8) | SIGTRAP);
981 send_sig(SIGTRAP, current, 0);
989 allow_write_access(interpreter);
994 kfree(elf_interpreter);
996 sys_close(elf_exec_fileno);
999 put_files_struct(current->files);
1000 current->files = files;
1004 current->flags &= ~PF_RELOCEXEC;
1005 current->flags |= old_relocexec;
1009 /* This is really simpleminded and specialized - we are loading an
1010 a.out library that is given an ELF header. */
1012 static int load_elf_library(struct file *file)
1014 struct elf_phdr *elf_phdata;
1015 unsigned long elf_bss, bss, len;
1016 int retval, error, i, j;
1017 struct elfhdr elf_ex;
1020 retval = kernel_read(file, 0, (char *) &elf_ex, sizeof(elf_ex));
1021 if (retval != sizeof(elf_ex))
1024 if (memcmp(elf_ex.e_ident, ELFMAG, SELFMAG) != 0)
1027 /* First of all, some simple consistency checks */
1028 if (elf_ex.e_type != ET_EXEC || elf_ex.e_phnum > 2 ||
1029 !elf_check_arch(&elf_ex) || !file->f_op || !file->f_op->mmap)
1032 /* Now read in all of the header information */
1034 j = sizeof(struct elf_phdr) * elf_ex.e_phnum;
1035 /* j < ELF_MIN_ALIGN because elf_ex.e_phnum <= 2 */
1038 elf_phdata = (struct elf_phdr *) kmalloc(j, GFP_KERNEL);
1043 retval = kernel_read(file, elf_ex.e_phoff, (char *) elf_phdata, j);
1047 for (j = 0, i = 0; i<elf_ex.e_phnum; i++)
1048 if ((elf_phdata + i)->p_type == PT_LOAD) j++;
1052 while (elf_phdata->p_type != PT_LOAD) elf_phdata++;
1054 /* Now use mmap to map the library into memory. */
1055 down_write(¤t->mm->mmap_sem);
1056 error = do_mmap(file,
1057 ELF_PAGESTART(elf_phdata->p_vaddr),
1058 (elf_phdata->p_filesz +
1059 ELF_PAGEOFFSET(elf_phdata->p_vaddr)),
1060 PROT_READ | PROT_WRITE | PROT_EXEC,
1061 MAP_FIXED | MAP_PRIVATE | MAP_DENYWRITE,
1062 (elf_phdata->p_offset -
1063 ELF_PAGEOFFSET(elf_phdata->p_vaddr)));
1064 up_write(¤t->mm->mmap_sem);
1065 if (error != ELF_PAGESTART(elf_phdata->p_vaddr))
1068 elf_bss = elf_phdata->p_vaddr + elf_phdata->p_filesz;
1071 len = ELF_PAGESTART(elf_phdata->p_filesz + elf_phdata->p_vaddr + ELF_MIN_ALIGN - 1);
1072 bss = elf_phdata->p_memsz + elf_phdata->p_vaddr;
1074 do_brk(len, bss - len);
1084 * Note that some platforms still use traditional core dumps and not
1085 * the ELF core dump. Each platform can select it as appropriate.
1087 #ifdef USE_ELF_CORE_DUMP
1092 * Modelled on fs/exec.c:aout_core_dump()
1093 * Jeremy Fitzhardinge <jeremy@sw.oz.au>
1096 * These are the only things you should do on a core-file: use only these
1097 * functions to write out all the necessary info.
1099 static int dump_write(struct file *file, const void *addr, int nr)
1101 return file->f_op->write(file, addr, nr, &file->f_pos) == nr;
1104 static int dump_seek(struct file *file, off_t off)
1106 if (file->f_op->llseek) {
1107 if (file->f_op->llseek(file, off, 0) != off)
1115 * Decide whether a segment is worth dumping; default is yes to be
1116 * sure (missing info is worse than too much; etc).
1117 * Personally I'd include everything, and use the coredump limit...
1119 * I think we should skip something. But I am not sure how. H.J.
1121 static int maydump(struct vm_area_struct *vma)
1124 * If we may not read the contents, don't allow us to dump
1125 * them either. "dump_write()" can't handle it anyway.
1127 if (!(vma->vm_flags & VM_READ))
1130 /* Do not dump I/O mapped devices! -DaveM */
1131 if (vma->vm_flags & VM_IO)
1134 if (vma->vm_flags & (VM_WRITE|VM_GROWSUP|VM_GROWSDOWN))
1136 if (vma->vm_flags & (VM_READ|VM_EXEC|VM_EXECUTABLE|VM_SHARED))
1142 #define roundup(x, y) ((((x)+((y)-1))/(y))*(y))
1144 /* An ELF note in memory */
1149 unsigned int datasz;
1153 static int notesize(struct memelfnote *en)
1157 sz = sizeof(struct elf_note);
1158 sz += roundup(strlen(en->name) + 1, 4);
1159 sz += roundup(en->datasz, 4);
1164 #define DUMP_WRITE(addr, nr) \
1165 do { if (!dump_write(file, (addr), (nr))) return 0; } while(0)
1166 #define DUMP_SEEK(off) \
1167 do { if (!dump_seek(file, (off))) return 0; } while(0)
1169 static int writenote(struct memelfnote *men, struct file *file)
1173 en.n_namesz = strlen(men->name) + 1;
1174 en.n_descsz = men->datasz;
1175 en.n_type = men->type;
1177 DUMP_WRITE(&en, sizeof(en));
1178 DUMP_WRITE(men->name, en.n_namesz);
1179 /* XXX - cast from long long to long to avoid need for libgcc.a */
1180 DUMP_SEEK(roundup((unsigned long)file->f_pos, 4)); /* XXX */
1181 DUMP_WRITE(men->data, men->datasz);
1182 DUMP_SEEK(roundup((unsigned long)file->f_pos, 4)); /* XXX */
1189 #define DUMP_WRITE(addr, nr) \
1190 if ((size += (nr)) > limit || !dump_write(file, (addr), (nr))) \
1192 #define DUMP_SEEK(off) \
1193 if (!dump_seek(file, (off))) \
1196 static inline void fill_elf_header(struct elfhdr *elf, int segs)
1198 memcpy(elf->e_ident, ELFMAG, SELFMAG);
1199 elf->e_ident[EI_CLASS] = ELF_CLASS;
1200 elf->e_ident[EI_DATA] = ELF_DATA;
1201 elf->e_ident[EI_VERSION] = EV_CURRENT;
1202 elf->e_ident[EI_OSABI] = ELF_OSABI;
1203 memset(elf->e_ident+EI_PAD, 0, EI_NIDENT-EI_PAD);
1205 elf->e_type = ET_CORE;
1206 elf->e_machine = ELF_ARCH;
1207 elf->e_version = EV_CURRENT;
1209 elf->e_phoff = sizeof(struct elfhdr);
1212 elf->e_ehsize = sizeof(struct elfhdr);
1213 elf->e_phentsize = sizeof(struct elf_phdr);
1214 elf->e_phnum = segs;
1215 elf->e_shentsize = 0;
1217 elf->e_shstrndx = 0;
1221 static inline void fill_elf_note_phdr(struct elf_phdr *phdr, int sz, off_t offset)
1223 phdr->p_type = PT_NOTE;
1224 phdr->p_offset = offset;
1227 phdr->p_filesz = sz;
1234 static void fill_note(struct memelfnote *note, const char *name, int type,
1235 unsigned int sz, void *data)
1245 * fill up all the fields in prstatus from the given task struct, except registers
1246 * which need to be filled up separately.
1248 static void fill_prstatus(struct elf_prstatus *prstatus,
1249 struct task_struct *p, long signr)
1251 prstatus->pr_info.si_signo = prstatus->pr_cursig = signr;
1252 prstatus->pr_sigpend = p->pending.signal.sig[0];
1253 prstatus->pr_sighold = p->blocked.sig[0];
1254 prstatus->pr_pid = p->pid;
1255 prstatus->pr_ppid = p->parent->pid;
1256 prstatus->pr_pgrp = process_group(p);
1257 prstatus->pr_sid = p->signal->session;
1258 jiffies_to_timeval(p->utime, &prstatus->pr_utime);
1259 jiffies_to_timeval(p->stime, &prstatus->pr_stime);
1260 jiffies_to_timeval(p->cutime, &prstatus->pr_cutime);
1261 jiffies_to_timeval(p->cstime, &prstatus->pr_cstime);
1264 static void fill_psinfo(struct elf_prpsinfo *psinfo, struct task_struct *p,
1265 struct mm_struct *mm)
1269 /* first copy the parameters from user space */
1270 memset(psinfo, 0, sizeof(struct elf_prpsinfo));
1272 len = mm->arg_end - mm->arg_start;
1273 if (len >= ELF_PRARGSZ)
1274 len = ELF_PRARGSZ-1;
1275 copy_from_user(&psinfo->pr_psargs,
1276 (const char __user *)mm->arg_start, len);
1277 for(i = 0; i < len; i++)
1278 if (psinfo->pr_psargs[i] == 0)
1279 psinfo->pr_psargs[i] = ' ';
1280 psinfo->pr_psargs[len] = 0;
1282 psinfo->pr_pid = p->pid;
1283 psinfo->pr_ppid = p->parent->pid;
1284 psinfo->pr_pgrp = process_group(p);
1285 psinfo->pr_sid = p->signal->session;
1287 i = p->state ? ffz(~p->state) + 1 : 0;
1288 psinfo->pr_state = i;
1289 psinfo->pr_sname = (i < 0 || i > 5) ? '.' : "RSDTZW"[i];
1290 psinfo->pr_zomb = psinfo->pr_sname == 'Z';
1291 psinfo->pr_nice = task_nice(p);
1292 psinfo->pr_flag = p->flags;
1293 SET_UID(psinfo->pr_uid, p->uid);
1294 SET_GID(psinfo->pr_gid, p->gid);
1295 strncpy(psinfo->pr_fname, p->comm, sizeof(psinfo->pr_fname));
1300 /* Here is the structure in which status of each thread is captured. */
1301 struct elf_thread_status
1303 struct list_head list;
1304 struct elf_prstatus prstatus; /* NT_PRSTATUS */
1305 elf_fpregset_t fpu; /* NT_PRFPREG */
1306 #ifdef ELF_CORE_COPY_XFPREGS
1307 elf_fpxregset_t xfpu; /* NT_PRXFPREG */
1309 struct memelfnote notes[3];
1314 * In order to add the specific thread information for the elf file format,
1315 * we need to keep a linked list of every threads pr_status and then
1316 * create a single section for them in the final core file.
1318 static int elf_dump_thread_status(long signr, struct task_struct * p, struct list_head * thread_list)
1321 struct elf_thread_status *t;
1324 t = kmalloc(sizeof(*t), GFP_ATOMIC);
1327 memset(t, 0, sizeof(*t));
1329 INIT_LIST_HEAD(&t->list);
1332 fill_prstatus(&t->prstatus, p, signr);
1333 elf_core_copy_task_regs(p, &t->prstatus.pr_reg);
1335 fill_note(&t->notes[0], "CORE", NT_PRSTATUS, sizeof(t->prstatus), &(t->prstatus));
1337 sz += notesize(&t->notes[0]);
1339 if ((t->prstatus.pr_fpvalid = elf_core_copy_task_fpregs(p, NULL, &t->fpu))) {
1340 fill_note(&t->notes[1], "CORE", NT_PRFPREG, sizeof(t->fpu), &(t->fpu));
1342 sz += notesize(&t->notes[1]);
1345 #ifdef ELF_CORE_COPY_XFPREGS
1346 if (elf_core_copy_task_xfpregs(p, &t->xfpu)) {
1347 fill_note(&t->notes[2], "LINUX", NT_PRXFPREG, sizeof(t->xfpu), &t->xfpu);
1349 sz += notesize(&t->notes[2]);
1352 list_add(&t->list, thread_list);
1359 * This is a two-pass process; first we find the offsets of the bits,
1360 * and then they are actually written out. If we run out of core limit
1363 static int elf_core_dump(long signr, struct pt_regs * regs, struct file * file)
1371 struct vm_area_struct *vma;
1372 struct elfhdr *elf = NULL;
1373 off_t offset = 0, dataoff;
1374 unsigned long limit = current->rlim[RLIMIT_CORE].rlim_cur;
1376 struct memelfnote *notes = NULL;
1377 struct elf_prstatus *prstatus = NULL; /* NT_PRSTATUS */
1378 struct elf_prpsinfo *psinfo = NULL; /* NT_PRPSINFO */
1379 struct task_struct *g, *p;
1380 LIST_HEAD(thread_list);
1381 struct list_head *t;
1382 elf_fpregset_t *fpu = NULL;
1383 #ifdef ELF_CORE_COPY_XFPREGS
1384 elf_fpxregset_t *xfpu = NULL;
1386 int thread_status_size = 0;
1390 * We no longer stop all VM operations.
1392 * This is because those proceses that could possibly change map_count or
1393 * the mmap / vma pages are now blocked in do_exit on current finishing
1396 * Only ptrace can touch these memory addresses, but it doesn't change
1397 * the map_count or the pages allocated. So no possibility of crashing
1398 * exists while dumping the mm->vm_next areas to the core file.
1401 /* alloc memory for large data structures: too large to be on stack */
1402 elf = kmalloc(sizeof(*elf), GFP_KERNEL);
1405 prstatus = kmalloc(sizeof(*prstatus), GFP_KERNEL);
1408 psinfo = kmalloc(sizeof(*psinfo), GFP_KERNEL);
1411 notes = kmalloc(NUM_NOTES * sizeof(struct memelfnote), GFP_KERNEL);
1414 fpu = kmalloc(sizeof(*fpu), GFP_KERNEL);
1417 #ifdef ELF_CORE_COPY_XFPREGS
1418 xfpu = kmalloc(sizeof(*xfpu), GFP_KERNEL);
1423 /* capture the status of all other threads */
1425 read_lock(&tasklist_lock);
1427 if (current->mm == p->mm && current != p) {
1428 int sz = elf_dump_thread_status(signr, p, &thread_list);
1430 read_unlock(&tasklist_lock);
1433 thread_status_size += sz;
1435 while_each_thread(g,p);
1436 read_unlock(&tasklist_lock);
1439 /* now collect the dump for the current */
1440 memset(prstatus, 0, sizeof(*prstatus));
1441 fill_prstatus(prstatus, current, signr);
1442 elf_core_copy_regs(&prstatus->pr_reg, regs);
1444 segs = current->mm->map_count;
1445 #ifdef ELF_CORE_EXTRA_PHDRS
1446 segs += ELF_CORE_EXTRA_PHDRS;
1450 fill_elf_header(elf, segs+1); /* including notes section */
1453 current->flags |= PF_DUMPCORE;
1456 * Set up the notes in similar form to SVR4 core dumps made
1457 * with info from their /proc.
1460 fill_note(notes +0, "CORE", NT_PRSTATUS, sizeof(*prstatus), prstatus);
1462 fill_psinfo(psinfo, current->group_leader, current->mm);
1463 fill_note(notes +1, "CORE", NT_PRPSINFO, sizeof(*psinfo), psinfo);
1465 fill_note(notes +2, "CORE", NT_TASKSTRUCT, sizeof(*current), current);
1469 auxv = (elf_addr_t *) current->mm->saved_auxv;
1474 while (auxv[i - 2] != AT_NULL);
1475 fill_note(¬es[numnote++], "CORE", NT_AUXV,
1476 i * sizeof (elf_addr_t), auxv);
1478 /* Try to dump the FPU. */
1479 if ((prstatus->pr_fpvalid = elf_core_copy_task_fpregs(current, regs, fpu)))
1480 fill_note(notes + numnote++,
1481 "CORE", NT_PRFPREG, sizeof(*fpu), fpu);
1482 #ifdef ELF_CORE_COPY_XFPREGS
1483 if (elf_core_copy_task_xfpregs(current, xfpu))
1484 fill_note(notes + numnote++,
1485 "LINUX", NT_PRXFPREG, sizeof(*xfpu), xfpu);
1491 DUMP_WRITE(elf, sizeof(*elf));
1492 offset += sizeof(*elf); /* Elf header */
1493 offset += (segs+1) * sizeof(struct elf_phdr); /* Program headers */
1495 /* Write notes phdr entry */
1497 struct elf_phdr phdr;
1500 for (i = 0; i < numnote; i++)
1501 sz += notesize(notes + i);
1503 sz += thread_status_size;
1505 fill_elf_note_phdr(&phdr, sz, offset);
1507 DUMP_WRITE(&phdr, sizeof(phdr));
1510 /* Page-align dumped data */
1511 dataoff = offset = roundup(offset, ELF_EXEC_PAGESIZE);
1513 /* Write program headers for segments dump */
1514 for (vma = current->mm->mmap; vma != NULL; vma = vma->vm_next) {
1515 struct elf_phdr phdr;
1518 sz = vma->vm_end - vma->vm_start;
1520 phdr.p_type = PT_LOAD;
1521 phdr.p_offset = offset;
1522 phdr.p_vaddr = vma->vm_start;
1524 phdr.p_filesz = maydump(vma) ? sz : 0;
1526 offset += phdr.p_filesz;
1527 phdr.p_flags = vma->vm_flags & VM_READ ? PF_R : 0;
1528 if (vma->vm_flags & VM_WRITE) phdr.p_flags |= PF_W;
1529 if (vma->vm_flags & VM_EXEC) phdr.p_flags |= PF_X;
1530 phdr.p_align = ELF_EXEC_PAGESIZE;
1532 DUMP_WRITE(&phdr, sizeof(phdr));
1535 #ifdef ELF_CORE_WRITE_EXTRA_PHDRS
1536 ELF_CORE_WRITE_EXTRA_PHDRS;
1539 /* write out the notes section */
1540 for (i = 0; i < numnote; i++)
1541 if (!writenote(notes + i, file))
1544 /* write out the thread status notes section */
1545 list_for_each(t, &thread_list) {
1546 struct elf_thread_status *tmp = list_entry(t, struct elf_thread_status, list);
1547 for (i = 0; i < tmp->num_notes; i++)
1548 if (!writenote(&tmp->notes[i], file))
1554 for (vma = current->mm->mmap; vma != NULL; vma = vma->vm_next) {
1560 for (addr = vma->vm_start;
1562 addr += PAGE_SIZE) {
1564 struct vm_area_struct *vma;
1566 if (get_user_pages(current, current->mm, addr, 1, 0, 1,
1567 &page, &vma) <= 0) {
1568 DUMP_SEEK (file->f_pos + PAGE_SIZE);
1570 if (page == ZERO_PAGE(addr)) {
1571 DUMP_SEEK (file->f_pos + PAGE_SIZE);
1574 flush_cache_page(vma, addr);
1576 if ((size += PAGE_SIZE) > limit ||
1577 !dump_write(file, kaddr,
1580 page_cache_release(page);
1585 page_cache_release(page);
1590 #ifdef ELF_CORE_WRITE_EXTRA_DATA
1591 ELF_CORE_WRITE_EXTRA_DATA;
1594 if ((off_t) file->f_pos != offset) {
1596 printk("elf_core_dump: file->f_pos (%ld) != offset (%ld)\n",
1597 (off_t) file->f_pos, offset);
1604 while(!list_empty(&thread_list)) {
1605 struct list_head *tmp = thread_list.next;
1607 kfree(list_entry(tmp, struct elf_thread_status, list));
1615 #ifdef ELF_CORE_COPY_XFPREGS
1622 #endif /* USE_ELF_CORE_DUMP */
1624 static int __init init_elf_binfmt(void)
1626 return register_binfmt(&elf_format);
1629 static void __exit exit_elf_binfmt(void)
1631 /* Remove the COFF and ELF loaders. */
1632 unregister_binfmt(&elf_format);
1635 core_initcall(init_elf_binfmt);
1636 module_exit(exit_elf_binfmt);
1637 MODULE_LICENSE("GPL");