2 * linux/fs/binfmt_elf.c
4 * These are the functions used to load ELF format executables as used
5 * on SVr4 machines. Information on the format may be found in the book
6 * "UNIX SYSTEM V RELEASE 4 Programmers Guide: Ansi C and Programming Support
9 * Copyright 1993, 1994: Eric Youngdale (ericy@cais.com).
12 #include <linux/module.h>
13 #include <linux/kernel.h>
15 #include <linux/stat.h>
16 #include <linux/time.h>
18 #include <linux/mman.h>
19 #include <linux/a.out.h>
20 #include <linux/errno.h>
21 #include <linux/signal.h>
22 #include <linux/binfmts.h>
23 #include <linux/string.h>
24 #include <linux/file.h>
25 #include <linux/fcntl.h>
26 #include <linux/ptrace.h>
27 #include <linux/slab.h>
28 #include <linux/shm.h>
29 #include <linux/personality.h>
30 #include <linux/elfcore.h>
31 #include <linux/init.h>
32 #include <linux/highuid.h>
33 #include <linux/smp.h>
34 #include <linux/smp_lock.h>
35 #include <linux/compiler.h>
36 #include <linux/highmem.h>
37 #include <linux/pagemap.h>
38 #include <linux/security.h>
39 #include <linux/syscalls.h>
40 #include <linux/vs_memory.h>
42 #include <asm/uaccess.h>
43 #include <asm/param.h>
46 #include <linux/elf.h>
48 static int load_elf_binary(struct linux_binprm * bprm, struct pt_regs * regs);
49 static int load_elf_library(struct file*);
50 static unsigned long elf_map (struct file *, unsigned long, struct elf_phdr *, int, int);
51 extern int dump_fpu (struct pt_regs *, elf_fpregset_t *);
54 #define elf_addr_t unsigned long
58 * If we don't support core dumping, then supply a NULL so we
61 #ifdef USE_ELF_CORE_DUMP
62 static int elf_core_dump(long signr, struct pt_regs * regs, struct file * file);
64 #define elf_core_dump NULL
67 #if ELF_EXEC_PAGESIZE > PAGE_SIZE
68 # define ELF_MIN_ALIGN ELF_EXEC_PAGESIZE
70 # define ELF_MIN_ALIGN PAGE_SIZE
73 #define ELF_PAGESTART(_v) ((_v) & ~(unsigned long)(ELF_MIN_ALIGN-1))
74 #define ELF_PAGEOFFSET(_v) ((_v) & (ELF_MIN_ALIGN-1))
75 #define ELF_PAGEALIGN(_v) (((_v) + ELF_MIN_ALIGN - 1) & ~(ELF_MIN_ALIGN - 1))
77 static struct linux_binfmt elf_format = {
78 .module = THIS_MODULE,
79 .load_binary = load_elf_binary,
80 .load_shlib = load_elf_library,
81 .core_dump = elf_core_dump,
82 .min_coredump = ELF_EXEC_PAGESIZE
85 #define BAD_ADDR(x) ((unsigned long)(x) > TASK_SIZE)
87 static int set_brk(unsigned long start, unsigned long end)
89 start = ELF_PAGEALIGN(start);
90 end = ELF_PAGEALIGN(end);
92 unsigned long addr = do_brk(start, end - start);
96 current->mm->start_brk = current->mm->brk = end;
101 /* We need to explicitly zero any fractional pages
102 after the data section (i.e. bss). This would
103 contain the junk from the file that should not
107 static void padzero(unsigned long elf_bss)
111 nbyte = ELF_PAGEOFFSET(elf_bss);
113 nbyte = ELF_MIN_ALIGN - nbyte;
114 clear_user((void __user *) elf_bss, nbyte);
118 /* Let's use some macros to make this stack manipulation a litle clearer */
119 #ifdef CONFIG_STACK_GROWSUP
120 #define STACK_ADD(sp, items) ((elf_addr_t __user *)(sp) + (items))
121 #define STACK_ROUND(sp, items) \
122 ((15 + (unsigned long) ((sp) + (items))) &~ 15UL)
123 #define STACK_ALLOC(sp, len) ({ elf_addr_t __user *old_sp = (elf_addr_t __user *)sp; sp += len; old_sp; })
125 #define STACK_ADD(sp, items) ((elf_addr_t __user *)(sp) - (items))
126 #define STACK_ROUND(sp, items) \
127 (((unsigned long) (sp - items)) &~ 15UL)
128 #define STACK_ALLOC(sp, len) ({ sp -= len ; sp; })
132 create_elf_tables(struct linux_binprm *bprm, struct elfhdr * exec,
133 int interp_aout, unsigned long load_addr,
134 unsigned long interp_load_addr)
136 unsigned long p = bprm->p;
137 int argc = bprm->argc;
138 int envc = bprm->envc;
139 elf_addr_t __user *argv;
140 elf_addr_t __user *envp;
141 elf_addr_t __user *sp;
142 elf_addr_t __user *u_platform;
143 const char *k_platform = ELF_PLATFORM;
145 elf_addr_t *elf_info;
147 struct task_struct *tsk = current;
150 * If this architecture has a platform capability string, copy it
151 * to userspace. In some cases (Sparc), this info is impossible
152 * for userspace to get any other way, in others (i386) it is
158 size_t len = strlen(k_platform) + 1;
162 * In some cases (e.g. Hyper-Threading), we want to avoid L1
163 * evictions by the processes running on the same package. One
164 * thing we can do is to shuffle the initial stack for them.
166 * The conditionals here are unneeded, but kept in to make the
167 * code behaviour the same as pre change unless we have
168 * hyperthreaded processors. This should be cleaned up
172 if (smp_num_siblings > 1)
173 STACK_ALLOC(p, ((current->pid % 64) << 7));
175 u_platform = (elf_addr_t __user *)STACK_ALLOC(p, len);
176 __copy_to_user(u_platform, k_platform, len);
179 /* Create the ELF interpreter info */
180 elf_info = (elf_addr_t *) current->mm->saved_auxv;
181 #define NEW_AUX_ENT(id, val) \
182 do { elf_info[ei_index++] = id; elf_info[ei_index++] = val; } while (0)
186 * ARCH_DLINFO must come first so PPC can do its special alignment of
191 NEW_AUX_ENT(AT_HWCAP, ELF_HWCAP);
192 NEW_AUX_ENT(AT_PAGESZ, ELF_EXEC_PAGESIZE);
193 NEW_AUX_ENT(AT_CLKTCK, CLOCKS_PER_SEC);
194 NEW_AUX_ENT(AT_PHDR, load_addr + exec->e_phoff);
195 NEW_AUX_ENT(AT_PHENT, sizeof (struct elf_phdr));
196 NEW_AUX_ENT(AT_PHNUM, exec->e_phnum);
197 NEW_AUX_ENT(AT_BASE, interp_load_addr);
198 NEW_AUX_ENT(AT_FLAGS, 0);
199 NEW_AUX_ENT(AT_ENTRY, exec->e_entry);
200 NEW_AUX_ENT(AT_UID, (elf_addr_t) tsk->uid);
201 NEW_AUX_ENT(AT_EUID, (elf_addr_t) tsk->euid);
202 NEW_AUX_ENT(AT_GID, (elf_addr_t) tsk->gid);
203 NEW_AUX_ENT(AT_EGID, (elf_addr_t) tsk->egid);
204 NEW_AUX_ENT(AT_SECURE, (elf_addr_t) security_bprm_secureexec(bprm));
206 NEW_AUX_ENT(AT_PLATFORM, (elf_addr_t)(unsigned long)u_platform);
208 if (bprm->interp_flags & BINPRM_FLAGS_EXECFD) {
209 NEW_AUX_ENT(AT_EXECFD, (elf_addr_t) bprm->interp_data);
212 /* AT_NULL is zero; clear the rest too */
213 memset(&elf_info[ei_index], 0,
214 sizeof current->mm->saved_auxv - ei_index * sizeof elf_info[0]);
216 /* And advance past the AT_NULL entry. */
219 sp = STACK_ADD(p, ei_index);
221 items = (argc + 1) + (envc + 1);
223 items += 3; /* a.out interpreters require argv & envp too */
225 items += 1; /* ELF interpreters only put argc on the stack */
227 bprm->p = STACK_ROUND(sp, items);
229 /* Point sp at the lowest address on the stack */
230 #ifdef CONFIG_STACK_GROWSUP
231 sp = (elf_addr_t __user *)bprm->p - items - ei_index;
232 bprm->exec = (unsigned long) sp; /* XXX: PARISC HACK */
234 sp = (elf_addr_t __user *)bprm->p;
237 /* Now, let's put argc (and argv, envp if appropriate) on the stack */
238 __put_user(argc, sp++);
241 envp = argv + argc + 1;
242 __put_user((elf_addr_t)(unsigned long)argv, sp++);
243 __put_user((elf_addr_t)(unsigned long)envp, sp++);
246 envp = argv + argc + 1;
249 /* Populate argv and envp */
250 p = current->mm->arg_start;
253 __put_user((elf_addr_t)p, argv++);
254 len = strnlen_user((void __user *)p, PAGE_SIZE*MAX_ARG_PAGES);
255 if (!len || len > PAGE_SIZE*MAX_ARG_PAGES)
260 current->mm->arg_end = current->mm->env_start = p;
263 __put_user((elf_addr_t)p, envp++);
264 len = strnlen_user((void __user *)p, PAGE_SIZE*MAX_ARG_PAGES);
265 if (!len || len > PAGE_SIZE*MAX_ARG_PAGES)
270 current->mm->env_end = p;
272 /* Put the elf_info on the stack in the right place. */
273 sp = (elf_addr_t __user *)envp + 1;
274 copy_to_user(sp, elf_info, ei_index * sizeof(elf_addr_t));
279 static unsigned long elf_map(struct file *filep, unsigned long addr,
280 struct elf_phdr *eppnt, int prot, int type)
282 unsigned long map_addr;
284 down_write(¤t->mm->mmap_sem);
285 map_addr = do_mmap(filep, ELF_PAGESTART(addr),
286 eppnt->p_filesz + ELF_PAGEOFFSET(eppnt->p_vaddr), prot, type,
287 eppnt->p_offset - ELF_PAGEOFFSET(eppnt->p_vaddr));
288 up_write(¤t->mm->mmap_sem);
292 #endif /* !elf_map */
294 /* This is much more generalized than the library routine read function,
295 so we keep this separate. Technically the library read function
296 is only provided so that we can read a.out libraries that have
299 static unsigned long load_elf_interp(struct elfhdr * interp_elf_ex,
300 struct file * interpreter,
301 unsigned long *interp_load_addr)
303 struct elf_phdr *elf_phdata;
304 struct elf_phdr *eppnt;
305 unsigned long load_addr = 0;
306 int load_addr_set = 0;
307 unsigned long last_bss = 0, elf_bss = 0;
308 unsigned long error = ~0UL;
311 /* First of all, some simple consistency checks */
312 if (interp_elf_ex->e_type != ET_EXEC &&
313 interp_elf_ex->e_type != ET_DYN)
315 if (!elf_check_arch(interp_elf_ex))
317 if (!interpreter->f_op || !interpreter->f_op->mmap)
321 * If the size of this structure has changed, then punt, since
322 * we will be doing the wrong thing.
324 if (interp_elf_ex->e_phentsize != sizeof(struct elf_phdr))
326 if (interp_elf_ex->e_phnum > 65536U / sizeof(struct elf_phdr))
329 /* Now read in all of the header information */
331 size = sizeof(struct elf_phdr) * interp_elf_ex->e_phnum;
332 if (size > ELF_MIN_ALIGN)
334 elf_phdata = (struct elf_phdr *) kmalloc(size, GFP_KERNEL);
338 retval = kernel_read(interpreter,interp_elf_ex->e_phoff,(char *)elf_phdata,size);
344 for (i=0; i<interp_elf_ex->e_phnum; i++, eppnt++) {
345 if (eppnt->p_type == PT_LOAD) {
346 int elf_type = MAP_PRIVATE | MAP_DENYWRITE;
348 unsigned long vaddr = 0;
349 unsigned long k, map_addr;
351 if (eppnt->p_flags & PF_R) elf_prot = PROT_READ;
352 if (eppnt->p_flags & PF_W) elf_prot |= PROT_WRITE;
353 if (eppnt->p_flags & PF_X) elf_prot |= PROT_EXEC;
354 vaddr = eppnt->p_vaddr;
355 if (interp_elf_ex->e_type == ET_EXEC || load_addr_set)
356 elf_type |= MAP_FIXED;
358 map_addr = elf_map(interpreter, load_addr + vaddr, eppnt, elf_prot, elf_type);
360 if (BAD_ADDR(map_addr))
363 if (!load_addr_set && interp_elf_ex->e_type == ET_DYN) {
364 load_addr = map_addr - ELF_PAGESTART(vaddr);
369 * Check to see if the section's size will overflow the
370 * allowed task size. Note that p_filesz must always be
371 * <= p_memsize so it is only necessary to check p_memsz.
373 k = load_addr + eppnt->p_vaddr;
374 if (k > TASK_SIZE || eppnt->p_filesz > eppnt->p_memsz ||
375 eppnt->p_memsz > TASK_SIZE || TASK_SIZE - eppnt->p_memsz < k) {
381 * Find the end of the file mapping for this phdr, and keep
382 * track of the largest address we see for this.
384 k = load_addr + eppnt->p_vaddr + eppnt->p_filesz;
389 * Do the same thing for the memory mapping - between
390 * elf_bss and last_bss is the bss section.
392 k = load_addr + eppnt->p_memsz + eppnt->p_vaddr;
399 * Now fill out the bss section. First pad the last page up
400 * to the page boundary, and then perform a mmap to make sure
401 * that there are zero-mapped pages up to and including the
405 elf_bss = ELF_PAGESTART(elf_bss + ELF_MIN_ALIGN - 1); /* What we have mapped so far */
407 /* Map the last of the bss segment */
408 if (last_bss > elf_bss) {
409 error = do_brk(elf_bss, last_bss - elf_bss);
414 *interp_load_addr = load_addr;
415 error = ((unsigned long) interp_elf_ex->e_entry) + load_addr;
423 static unsigned long load_aout_interp(struct exec * interp_ex,
424 struct file * interpreter)
426 unsigned long text_data, elf_entry = ~0UL;
430 current->mm->end_code = interp_ex->a_text;
431 text_data = interp_ex->a_text + interp_ex->a_data;
432 current->mm->end_data = text_data;
433 current->mm->brk = interp_ex->a_bss + text_data;
435 switch (N_MAGIC(*interp_ex)) {
438 addr = (char __user *)0;
442 offset = N_TXTOFF(*interp_ex);
443 addr = (char __user *) N_TXTADDR(*interp_ex);
449 do_brk(0, text_data);
450 if (!interpreter->f_op || !interpreter->f_op->read)
452 if (interpreter->f_op->read(interpreter, addr, text_data, &offset) < 0)
454 flush_icache_range((unsigned long)addr,
455 (unsigned long)addr + text_data);
457 do_brk(ELF_PAGESTART(text_data + ELF_MIN_ALIGN - 1),
459 elf_entry = interp_ex->a_entry;
466 * These are the functions used to load ELF style executables and shared
467 * libraries. There is no binary dependent code anywhere else.
470 #define INTERPRETER_NONE 0
471 #define INTERPRETER_AOUT 1
472 #define INTERPRETER_ELF 2
475 static int load_elf_binary(struct linux_binprm * bprm, struct pt_regs * regs)
477 struct file *interpreter = NULL; /* to shut gcc up */
478 unsigned long load_addr = 0, load_bias = 0;
479 int load_addr_set = 0;
480 char * elf_interpreter = NULL;
481 unsigned int interpreter_type = INTERPRETER_NONE;
482 unsigned char ibcs2_interpreter = 0;
484 struct elf_phdr * elf_ppnt, *elf_phdata;
485 unsigned long elf_bss, elf_brk;
489 unsigned long elf_entry, interp_load_addr = 0;
490 unsigned long start_code, end_code, start_data, end_data;
491 unsigned long reloc_func_desc = 0;
492 char passed_fileno[6];
493 struct files_struct *files;
494 int have_pt_gnu_stack, executable_stack = EXSTACK_DEFAULT;
495 unsigned long def_flags = 0;
497 struct elfhdr elf_ex;
498 struct elfhdr interp_elf_ex;
499 struct exec interp_ex;
502 loc = kmalloc(sizeof(*loc), GFP_KERNEL);
508 /* Get the exec-header */
509 loc->elf_ex = *((struct elfhdr *) bprm->buf);
512 /* First of all, some simple consistency checks */
513 if (memcmp(loc->elf_ex.e_ident, ELFMAG, SELFMAG) != 0)
516 if (loc->elf_ex.e_type != ET_EXEC && loc->elf_ex.e_type != ET_DYN)
518 if (!elf_check_arch(&loc->elf_ex))
520 if (!bprm->file->f_op||!bprm->file->f_op->mmap)
523 /* Now read in all of the header information */
526 if (loc->elf_ex.e_phentsize != sizeof(struct elf_phdr))
528 if (loc->elf_ex.e_phnum > 65536U / sizeof(struct elf_phdr))
530 size = loc->elf_ex.e_phnum * sizeof(struct elf_phdr);
531 elf_phdata = (struct elf_phdr *) kmalloc(size, GFP_KERNEL);
535 retval = kernel_read(bprm->file, loc->elf_ex.e_phoff, (char *) elf_phdata, size);
539 files = current->files; /* Refcounted so ok */
540 retval = unshare_files();
543 if (files == current->files) {
544 put_files_struct(files);
548 /* exec will make our files private anyway, but for the a.out
549 loader stuff we need to do it earlier */
551 retval = get_unused_fd();
554 get_file(bprm->file);
555 fd_install(elf_exec_fileno = retval, bprm->file);
557 elf_ppnt = elf_phdata;
566 for (i = 0; i < loc->elf_ex.e_phnum; i++) {
567 if (elf_ppnt->p_type == PT_INTERP) {
568 /* This is the program interpreter used for
569 * shared libraries - for now assume that this
570 * is an a.out format binary
574 if (elf_ppnt->p_filesz > PATH_MAX)
576 elf_interpreter = (char *) kmalloc(elf_ppnt->p_filesz,
578 if (!elf_interpreter)
581 retval = kernel_read(bprm->file, elf_ppnt->p_offset,
585 goto out_free_interp;
586 /* If the program interpreter is one of these two,
587 * then assume an iBCS2 image. Otherwise assume
588 * a native linux image.
590 if (strcmp(elf_interpreter,"/usr/lib/libc.so.1") == 0 ||
591 strcmp(elf_interpreter,"/usr/lib/ld.so.1") == 0)
592 ibcs2_interpreter = 1;
595 * The early SET_PERSONALITY here is so that the lookup
596 * for the interpreter happens in the namespace of the
597 * to-be-execed image. SET_PERSONALITY can select an
600 * However, SET_PERSONALITY is NOT allowed to switch
601 * this task into the new images's memory mapping
602 * policy - that is, TASK_SIZE must still evaluate to
603 * that which is appropriate to the execing application.
604 * This is because exit_mmap() needs to have TASK_SIZE
605 * evaluate to the size of the old image.
607 * So if (say) a 64-bit application is execing a 32-bit
608 * application it is the architecture's responsibility
609 * to defer changing the value of TASK_SIZE until the
610 * switch really is going to happen - do this in
611 * flush_thread(). - akpm
613 SET_PERSONALITY(loc->elf_ex, ibcs2_interpreter);
615 interpreter = open_exec(elf_interpreter);
616 retval = PTR_ERR(interpreter);
617 if (IS_ERR(interpreter))
618 goto out_free_interp;
619 retval = kernel_read(interpreter, 0, bprm->buf, BINPRM_BUF_SIZE);
621 goto out_free_dentry;
623 /* Get the exec headers */
624 loc->interp_ex = *((struct exec *) bprm->buf);
625 loc->interp_elf_ex = *((struct elfhdr *) bprm->buf);
631 elf_ppnt = elf_phdata;
632 for (i = 0; i < loc->elf_ex.e_phnum; i++, elf_ppnt++)
633 if (elf_ppnt->p_type == PT_GNU_STACK) {
634 if (elf_ppnt->p_flags & PF_X)
635 executable_stack = EXSTACK_ENABLE_X;
637 executable_stack = EXSTACK_DISABLE_X;
640 have_pt_gnu_stack = (i < loc->elf_ex.e_phnum);
642 /* Some simple consistency checks for the interpreter */
643 if (elf_interpreter) {
644 interpreter_type = INTERPRETER_ELF | INTERPRETER_AOUT;
646 /* Now figure out which format our binary is */
647 if ((N_MAGIC(loc->interp_ex) != OMAGIC) &&
648 (N_MAGIC(loc->interp_ex) != ZMAGIC) &&
649 (N_MAGIC(loc->interp_ex) != QMAGIC))
650 interpreter_type = INTERPRETER_ELF;
652 if (memcmp(loc->interp_elf_ex.e_ident, ELFMAG, SELFMAG) != 0)
653 interpreter_type &= ~INTERPRETER_ELF;
656 if (!interpreter_type)
657 goto out_free_dentry;
659 /* Make sure only one type was selected */
660 if ((interpreter_type & INTERPRETER_ELF) &&
661 interpreter_type != INTERPRETER_ELF) {
662 // FIXME - ratelimit this before re-enabling
663 // printk(KERN_WARNING "ELF: Ambiguous type, using ELF\n");
664 interpreter_type = INTERPRETER_ELF;
666 /* Verify the interpreter has a valid arch */
667 if ((interpreter_type == INTERPRETER_ELF) &&
668 !elf_check_arch(&loc->interp_elf_ex))
669 goto out_free_dentry;
671 /* Executables without an interpreter also need a personality */
672 SET_PERSONALITY(loc->elf_ex, ibcs2_interpreter);
675 /* OK, we are done with that, now set up the arg stuff,
676 and then start this sucker up */
678 if ((!bprm->sh_bang) && (interpreter_type == INTERPRETER_AOUT)) {
679 char *passed_p = passed_fileno;
680 sprintf(passed_fileno, "%d", elf_exec_fileno);
682 if (elf_interpreter) {
683 retval = copy_strings_kernel(1, &passed_p, bprm);
685 goto out_free_dentry;
690 /* Flush all traces of the currently running executable */
691 retval = flush_old_exec(bprm);
693 goto out_free_dentry;
695 /* Discard our unneeded old files struct */
698 put_files_struct(files);
702 /* OK, This is the point of no return */
703 current->mm->start_data = 0;
704 current->mm->end_data = 0;
705 current->mm->end_code = 0;
706 current->mm->mmap = NULL;
707 current->flags &= ~PF_FORKNOEXEC;
708 current->mm->def_flags = def_flags;
710 /* Do this immediately, since STACK_TOP as used in setup_arg_pages
711 may depend on the personality. */
712 SET_PERSONALITY(loc->elf_ex, ibcs2_interpreter);
713 if (elf_read_implies_exec(loc->elf_ex, have_pt_gnu_stack))
714 current->personality |= READ_IMPLIES_EXEC;
716 arch_pick_mmap_layout(current->mm);
718 /* Do this so that we can load the interpreter, if need be. We will
719 change some of these later */
720 // current->mm->rss = 0;
721 vx_rsspages_sub(current->mm, current->mm->rss);
722 current->mm->free_area_cache = current->mm->mmap_base;
723 retval = setup_arg_pages(bprm, executable_stack);
725 send_sig(SIGKILL, current, 0);
726 goto out_free_dentry;
729 current->mm->start_stack = bprm->p;
731 /* Now we do a little grungy work by mmaping the ELF image into
732 the correct location in memory. At this point, we assume that
733 the image should be loaded at fixed address, not at a variable
736 for(i = 0, elf_ppnt = elf_phdata; i < loc->elf_ex.e_phnum; i++, elf_ppnt++) {
737 int elf_prot = 0, elf_flags;
738 unsigned long k, vaddr;
740 if (elf_ppnt->p_type != PT_LOAD)
743 if (unlikely (elf_brk > elf_bss)) {
746 /* There was a PT_LOAD segment with p_memsz > p_filesz
747 before this one. Map anonymous pages, if needed,
748 and clear the area. */
749 retval = set_brk (elf_bss + load_bias,
750 elf_brk + load_bias);
752 send_sig(SIGKILL, current, 0);
753 goto out_free_dentry;
755 nbyte = ELF_PAGEOFFSET(elf_bss);
757 nbyte = ELF_MIN_ALIGN - nbyte;
758 if (nbyte > elf_brk - elf_bss)
759 nbyte = elf_brk - elf_bss;
760 clear_user((void __user *) elf_bss + load_bias, nbyte);
764 if (elf_ppnt->p_flags & PF_R) elf_prot |= PROT_READ;
765 if (elf_ppnt->p_flags & PF_W) elf_prot |= PROT_WRITE;
766 if (elf_ppnt->p_flags & PF_X) elf_prot |= PROT_EXEC;
768 elf_flags = MAP_PRIVATE|MAP_DENYWRITE|MAP_EXECUTABLE;
770 vaddr = elf_ppnt->p_vaddr;
771 if (loc->elf_ex.e_type == ET_EXEC || load_addr_set) {
772 elf_flags |= MAP_FIXED;
773 } else if (loc->elf_ex.e_type == ET_DYN) {
774 /* Try and get dynamic programs out of the way of the default mmap
775 base, as well as whatever program they might try to exec. This
776 is because the brk will follow the loader, and is not movable. */
777 load_bias = ELF_PAGESTART(ELF_ET_DYN_BASE - vaddr);
780 error = elf_map(bprm->file, load_bias + vaddr, elf_ppnt, elf_prot, elf_flags);
784 if (!load_addr_set) {
786 load_addr = (elf_ppnt->p_vaddr - elf_ppnt->p_offset);
787 if (loc->elf_ex.e_type == ET_DYN) {
789 ELF_PAGESTART(load_bias + vaddr);
790 load_addr += load_bias;
791 reloc_func_desc = load_bias;
794 k = elf_ppnt->p_vaddr;
795 if (k < start_code) start_code = k;
796 if (start_data < k) start_data = k;
799 * Check to see if the section's size will overflow the
800 * allowed task size. Note that p_filesz must always be
801 * <= p_memsz so it is only necessary to check p_memsz.
803 if (k > TASK_SIZE || elf_ppnt->p_filesz > elf_ppnt->p_memsz ||
804 elf_ppnt->p_memsz > TASK_SIZE ||
805 TASK_SIZE - elf_ppnt->p_memsz < k) {
806 /* set_brk can never work. Avoid overflows. */
807 send_sig(SIGKILL, current, 0);
808 goto out_free_dentry;
811 k = elf_ppnt->p_vaddr + elf_ppnt->p_filesz;
815 if ((elf_ppnt->p_flags & PF_X) && end_code < k)
819 k = elf_ppnt->p_vaddr + elf_ppnt->p_memsz;
824 loc->elf_ex.e_entry += load_bias;
825 elf_bss += load_bias;
826 elf_brk += load_bias;
827 start_code += load_bias;
828 end_code += load_bias;
829 start_data += load_bias;
830 end_data += load_bias;
832 /* Calling set_brk effectively mmaps the pages that we need
833 * for the bss and break sections. We must do this before
834 * mapping in the interpreter, to make sure it doesn't wind
835 * up getting placed where the bss needs to go.
837 retval = set_brk(elf_bss, elf_brk);
839 send_sig(SIGKILL, current, 0);
840 goto out_free_dentry;
844 if (elf_interpreter) {
845 if (interpreter_type == INTERPRETER_AOUT)
846 elf_entry = load_aout_interp(&loc->interp_ex,
849 elf_entry = load_elf_interp(&loc->interp_elf_ex,
852 if (BAD_ADDR(elf_entry)) {
853 printk(KERN_ERR "Unable to load interpreter\n");
854 send_sig(SIGSEGV, current, 0);
855 retval = -ENOEXEC; /* Nobody gets to see this, but.. */
856 goto out_free_dentry;
858 reloc_func_desc = interp_load_addr;
860 allow_write_access(interpreter);
862 kfree(elf_interpreter);
864 elf_entry = loc->elf_ex.e_entry;
869 if (interpreter_type != INTERPRETER_AOUT)
870 sys_close(elf_exec_fileno);
872 set_binfmt(&elf_format);
875 current->flags &= ~PF_FORKNOEXEC;
876 create_elf_tables(bprm, &loc->elf_ex, (interpreter_type == INTERPRETER_AOUT),
877 load_addr, interp_load_addr);
878 /* N.B. passed_fileno might not be initialized? */
879 if (interpreter_type == INTERPRETER_AOUT)
880 current->mm->arg_start += strlen(passed_fileno) + 1;
881 current->mm->end_code = end_code;
882 current->mm->start_code = start_code;
883 current->mm->start_data = start_data;
884 current->mm->end_data = end_data;
885 current->mm->start_stack = bprm->p;
887 if (current->personality & MMAP_PAGE_ZERO) {
888 /* Why this, you ask??? Well SVr4 maps page 0 as read-only,
889 and some applications "depend" upon this behavior.
890 Since we do not have the power to recompile these, we
891 emulate the SVr4 behavior. Sigh. */
892 down_write(¤t->mm->mmap_sem);
893 error = do_mmap(NULL, 0, PAGE_SIZE, PROT_READ | PROT_EXEC,
894 MAP_FIXED | MAP_PRIVATE, 0);
895 up_write(¤t->mm->mmap_sem);
900 * The ABI may specify that certain registers be set up in special
901 * ways (on i386 %edx is the address of a DT_FINI function, for
902 * example. In addition, it may also specify (eg, PowerPC64 ELF)
903 * that the e_entry field is the address of the function descriptor
904 * for the startup routine, rather than the address of the startup
905 * routine itself. This macro performs whatever initialization to
906 * the regs structure is required as well as any relocations to the
907 * function descriptor entries when executing dynamically links apps.
909 ELF_PLAT_INIT(regs, reloc_func_desc);
912 start_thread(regs, elf_entry, bprm->p);
913 if (unlikely(current->ptrace & PT_PTRACED)) {
914 if (current->ptrace & PT_TRACE_EXEC)
915 ptrace_notify ((PTRACE_EVENT_EXEC << 8) | SIGTRAP);
917 send_sig(SIGTRAP, current, 0);
927 allow_write_access(interpreter);
932 kfree(elf_interpreter);
934 sys_close(elf_exec_fileno);
937 put_files_struct(current->files);
938 current->files = files;
945 /* This is really simpleminded and specialized - we are loading an
946 a.out library that is given an ELF header. */
948 static int load_elf_library(struct file *file)
950 struct elf_phdr *elf_phdata;
951 unsigned long elf_bss, bss, len;
952 int retval, error, i, j;
953 struct elfhdr elf_ex;
956 retval = kernel_read(file, 0, (char *) &elf_ex, sizeof(elf_ex));
957 if (retval != sizeof(elf_ex))
960 if (memcmp(elf_ex.e_ident, ELFMAG, SELFMAG) != 0)
963 /* First of all, some simple consistency checks */
964 if (elf_ex.e_type != ET_EXEC || elf_ex.e_phnum > 2 ||
965 !elf_check_arch(&elf_ex) || !file->f_op || !file->f_op->mmap)
968 /* Now read in all of the header information */
970 j = sizeof(struct elf_phdr) * elf_ex.e_phnum;
971 /* j < ELF_MIN_ALIGN because elf_ex.e_phnum <= 2 */
974 elf_phdata = (struct elf_phdr *) kmalloc(j, GFP_KERNEL);
979 retval = kernel_read(file, elf_ex.e_phoff, (char *) elf_phdata, j);
983 for (j = 0, i = 0; i<elf_ex.e_phnum; i++)
984 if ((elf_phdata + i)->p_type == PT_LOAD) j++;
988 while (elf_phdata->p_type != PT_LOAD) elf_phdata++;
990 /* Now use mmap to map the library into memory. */
991 down_write(¤t->mm->mmap_sem);
992 error = do_mmap(file,
993 ELF_PAGESTART(elf_phdata->p_vaddr),
994 (elf_phdata->p_filesz +
995 ELF_PAGEOFFSET(elf_phdata->p_vaddr)),
996 PROT_READ | PROT_WRITE | PROT_EXEC,
997 MAP_FIXED | MAP_PRIVATE | MAP_DENYWRITE,
998 (elf_phdata->p_offset -
999 ELF_PAGEOFFSET(elf_phdata->p_vaddr)));
1000 up_write(¤t->mm->mmap_sem);
1001 if (error != ELF_PAGESTART(elf_phdata->p_vaddr))
1004 elf_bss = elf_phdata->p_vaddr + elf_phdata->p_filesz;
1007 len = ELF_PAGESTART(elf_phdata->p_filesz + elf_phdata->p_vaddr + ELF_MIN_ALIGN - 1);
1008 bss = elf_phdata->p_memsz + elf_phdata->p_vaddr;
1010 do_brk(len, bss - len);
1020 * Note that some platforms still use traditional core dumps and not
1021 * the ELF core dump. Each platform can select it as appropriate.
1023 #ifdef USE_ELF_CORE_DUMP
1028 * Modelled on fs/exec.c:aout_core_dump()
1029 * Jeremy Fitzhardinge <jeremy@sw.oz.au>
1032 * These are the only things you should do on a core-file: use only these
1033 * functions to write out all the necessary info.
1035 static int dump_write(struct file *file, const void *addr, int nr)
1037 return file->f_op->write(file, addr, nr, &file->f_pos) == nr;
1040 static int dump_seek(struct file *file, off_t off)
1042 if (file->f_op->llseek) {
1043 if (file->f_op->llseek(file, off, 0) != off)
1051 * Decide whether a segment is worth dumping; default is yes to be
1052 * sure (missing info is worse than too much; etc).
1053 * Personally I'd include everything, and use the coredump limit...
1055 * I think we should skip something. But I am not sure how. H.J.
1057 static int maydump(struct vm_area_struct *vma)
1060 * If we may not read the contents, don't allow us to dump
1061 * them either. "dump_write()" can't handle it anyway.
1063 if (!(vma->vm_flags & VM_READ))
1066 /* Do not dump I/O mapped devices! -DaveM */
1067 if (vma->vm_flags & VM_IO)
1070 if (vma->vm_flags & (VM_WRITE|VM_GROWSUP|VM_GROWSDOWN))
1072 if (vma->vm_flags & (VM_READ|VM_EXEC|VM_EXECUTABLE|VM_SHARED))
1078 #define roundup(x, y) ((((x)+((y)-1))/(y))*(y))
1080 /* An ELF note in memory */
1085 unsigned int datasz;
1089 static int notesize(struct memelfnote *en)
1093 sz = sizeof(struct elf_note);
1094 sz += roundup(strlen(en->name) + 1, 4);
1095 sz += roundup(en->datasz, 4);
1100 #define DUMP_WRITE(addr, nr) \
1101 do { if (!dump_write(file, (addr), (nr))) return 0; } while(0)
1102 #define DUMP_SEEK(off) \
1103 do { if (!dump_seek(file, (off))) return 0; } while(0)
1105 static int writenote(struct memelfnote *men, struct file *file)
1109 en.n_namesz = strlen(men->name) + 1;
1110 en.n_descsz = men->datasz;
1111 en.n_type = men->type;
1113 DUMP_WRITE(&en, sizeof(en));
1114 DUMP_WRITE(men->name, en.n_namesz);
1115 /* XXX - cast from long long to long to avoid need for libgcc.a */
1116 DUMP_SEEK(roundup((unsigned long)file->f_pos, 4)); /* XXX */
1117 DUMP_WRITE(men->data, men->datasz);
1118 DUMP_SEEK(roundup((unsigned long)file->f_pos, 4)); /* XXX */
1125 #define DUMP_WRITE(addr, nr) \
1126 if ((size += (nr)) > limit || !dump_write(file, (addr), (nr))) \
1128 #define DUMP_SEEK(off) \
1129 if (!dump_seek(file, (off))) \
1132 static inline void fill_elf_header(struct elfhdr *elf, int segs)
1134 memcpy(elf->e_ident, ELFMAG, SELFMAG);
1135 elf->e_ident[EI_CLASS] = ELF_CLASS;
1136 elf->e_ident[EI_DATA] = ELF_DATA;
1137 elf->e_ident[EI_VERSION] = EV_CURRENT;
1138 elf->e_ident[EI_OSABI] = ELF_OSABI;
1139 memset(elf->e_ident+EI_PAD, 0, EI_NIDENT-EI_PAD);
1141 elf->e_type = ET_CORE;
1142 elf->e_machine = ELF_ARCH;
1143 elf->e_version = EV_CURRENT;
1145 elf->e_phoff = sizeof(struct elfhdr);
1148 elf->e_ehsize = sizeof(struct elfhdr);
1149 elf->e_phentsize = sizeof(struct elf_phdr);
1150 elf->e_phnum = segs;
1151 elf->e_shentsize = 0;
1153 elf->e_shstrndx = 0;
1157 static inline void fill_elf_note_phdr(struct elf_phdr *phdr, int sz, off_t offset)
1159 phdr->p_type = PT_NOTE;
1160 phdr->p_offset = offset;
1163 phdr->p_filesz = sz;
1170 static void fill_note(struct memelfnote *note, const char *name, int type,
1171 unsigned int sz, void *data)
1181 * fill up all the fields in prstatus from the given task struct, except registers
1182 * which need to be filled up separately.
1184 static void fill_prstatus(struct elf_prstatus *prstatus,
1185 struct task_struct *p, long signr)
1187 prstatus->pr_info.si_signo = prstatus->pr_cursig = signr;
1188 prstatus->pr_sigpend = p->pending.signal.sig[0];
1189 prstatus->pr_sighold = p->blocked.sig[0];
1190 prstatus->pr_pid = p->pid;
1191 prstatus->pr_ppid = p->parent->pid;
1192 prstatus->pr_pgrp = process_group(p);
1193 prstatus->pr_sid = p->signal->session;
1194 if (p->pid == p->tgid) {
1196 * This is the record for the group leader. Add in the
1197 * cumulative times of previous dead threads. This total
1198 * won't include the time of each live thread whose state
1199 * is included in the core dump. The final total reported
1200 * to our parent process when it calls wait4 will include
1201 * those sums as well as the little bit more time it takes
1202 * this and each other thread to finish dying after the
1203 * core dump synchronization phase.
1205 jiffies_to_timeval(p->utime + p->signal->utime,
1206 &prstatus->pr_utime);
1207 jiffies_to_timeval(p->stime + p->signal->stime,
1208 &prstatus->pr_stime);
1210 jiffies_to_timeval(p->utime, &prstatus->pr_utime);
1211 jiffies_to_timeval(p->stime, &prstatus->pr_stime);
1213 jiffies_to_timeval(p->signal->cutime, &prstatus->pr_cutime);
1214 jiffies_to_timeval(p->signal->cstime, &prstatus->pr_cstime);
1217 static void fill_psinfo(struct elf_prpsinfo *psinfo, struct task_struct *p,
1218 struct mm_struct *mm)
1222 /* first copy the parameters from user space */
1223 memset(psinfo, 0, sizeof(struct elf_prpsinfo));
1225 len = mm->arg_end - mm->arg_start;
1226 if (len >= ELF_PRARGSZ)
1227 len = ELF_PRARGSZ-1;
1228 copy_from_user(&psinfo->pr_psargs,
1229 (const char __user *)mm->arg_start, len);
1230 for(i = 0; i < len; i++)
1231 if (psinfo->pr_psargs[i] == 0)
1232 psinfo->pr_psargs[i] = ' ';
1233 psinfo->pr_psargs[len] = 0;
1235 psinfo->pr_pid = p->pid;
1236 psinfo->pr_ppid = p->parent->pid;
1237 psinfo->pr_pgrp = process_group(p);
1238 psinfo->pr_sid = p->signal->session;
1240 i = p->state ? ffz(~p->state) + 1 : 0;
1241 psinfo->pr_state = i;
1242 psinfo->pr_sname = (i < 0 || i > 5) ? '.' : "RSDTZW"[i];
1243 psinfo->pr_zomb = psinfo->pr_sname == 'Z';
1244 psinfo->pr_nice = task_nice(p);
1245 psinfo->pr_flag = p->flags;
1246 SET_UID(psinfo->pr_uid, p->uid);
1247 SET_GID(psinfo->pr_gid, p->gid);
1248 strncpy(psinfo->pr_fname, p->comm, sizeof(psinfo->pr_fname));
1253 /* Here is the structure in which status of each thread is captured. */
1254 struct elf_thread_status
1256 struct list_head list;
1257 struct elf_prstatus prstatus; /* NT_PRSTATUS */
1258 elf_fpregset_t fpu; /* NT_PRFPREG */
1259 struct task_struct *thread;
1260 #ifdef ELF_CORE_COPY_XFPREGS
1261 elf_fpxregset_t xfpu; /* NT_PRXFPREG */
1263 struct memelfnote notes[3];
1268 * In order to add the specific thread information for the elf file format,
1269 * we need to keep a linked list of every threads pr_status and then
1270 * create a single section for them in the final core file.
1272 static int elf_dump_thread_status(long signr, struct elf_thread_status *t)
1275 struct task_struct *p = t->thread;
1278 fill_prstatus(&t->prstatus, p, signr);
1279 elf_core_copy_task_regs(p, &t->prstatus.pr_reg);
1281 fill_note(&t->notes[0], "CORE", NT_PRSTATUS, sizeof(t->prstatus), &(t->prstatus));
1283 sz += notesize(&t->notes[0]);
1285 if ((t->prstatus.pr_fpvalid = elf_core_copy_task_fpregs(p, NULL, &t->fpu))) {
1286 fill_note(&t->notes[1], "CORE", NT_PRFPREG, sizeof(t->fpu), &(t->fpu));
1288 sz += notesize(&t->notes[1]);
1291 #ifdef ELF_CORE_COPY_XFPREGS
1292 if (elf_core_copy_task_xfpregs(p, &t->xfpu)) {
1293 fill_note(&t->notes[2], "LINUX", NT_PRXFPREG, sizeof(t->xfpu), &t->xfpu);
1295 sz += notesize(&t->notes[2]);
1304 * This is a two-pass process; first we find the offsets of the bits,
1305 * and then they are actually written out. If we run out of core limit
1308 static int elf_core_dump(long signr, struct pt_regs * regs, struct file * file)
1316 struct vm_area_struct *vma;
1317 struct elfhdr *elf = NULL;
1318 off_t offset = 0, dataoff;
1319 unsigned long limit = current->rlim[RLIMIT_CORE].rlim_cur;
1321 struct memelfnote *notes = NULL;
1322 struct elf_prstatus *prstatus = NULL; /* NT_PRSTATUS */
1323 struct elf_prpsinfo *psinfo = NULL; /* NT_PRPSINFO */
1324 struct task_struct *g, *p;
1325 LIST_HEAD(thread_list);
1326 struct list_head *t;
1327 elf_fpregset_t *fpu = NULL;
1328 #ifdef ELF_CORE_COPY_XFPREGS
1329 elf_fpxregset_t *xfpu = NULL;
1331 int thread_status_size = 0;
1335 * We no longer stop all VM operations.
1337 * This is because those proceses that could possibly change map_count or
1338 * the mmap / vma pages are now blocked in do_exit on current finishing
1341 * Only ptrace can touch these memory addresses, but it doesn't change
1342 * the map_count or the pages allocated. So no possibility of crashing
1343 * exists while dumping the mm->vm_next areas to the core file.
1346 /* alloc memory for large data structures: too large to be on stack */
1347 elf = kmalloc(sizeof(*elf), GFP_KERNEL);
1350 prstatus = kmalloc(sizeof(*prstatus), GFP_KERNEL);
1353 psinfo = kmalloc(sizeof(*psinfo), GFP_KERNEL);
1356 notes = kmalloc(NUM_NOTES * sizeof(struct memelfnote), GFP_KERNEL);
1359 fpu = kmalloc(sizeof(*fpu), GFP_KERNEL);
1362 #ifdef ELF_CORE_COPY_XFPREGS
1363 xfpu = kmalloc(sizeof(*xfpu), GFP_KERNEL);
1369 struct elf_thread_status *tmp;
1370 read_lock(&tasklist_lock);
1372 if (current->mm == p->mm && current != p) {
1373 tmp = kmalloc(sizeof(*tmp), GFP_ATOMIC);
1375 read_unlock(&tasklist_lock);
1378 memset(tmp, 0, sizeof(*tmp));
1379 INIT_LIST_HEAD(&tmp->list);
1381 list_add(&tmp->list, &thread_list);
1383 while_each_thread(g,p);
1384 read_unlock(&tasklist_lock);
1385 list_for_each(t, &thread_list) {
1386 struct elf_thread_status *tmp;
1389 tmp = list_entry(t, struct elf_thread_status, list);
1390 sz = elf_dump_thread_status(signr, tmp);
1391 thread_status_size += sz;
1394 /* now collect the dump for the current */
1395 memset(prstatus, 0, sizeof(*prstatus));
1396 fill_prstatus(prstatus, current, signr);
1397 elf_core_copy_regs(&prstatus->pr_reg, regs);
1399 segs = current->mm->map_count;
1400 #ifdef ELF_CORE_EXTRA_PHDRS
1401 segs += ELF_CORE_EXTRA_PHDRS;
1405 fill_elf_header(elf, segs+1); /* including notes section */
1408 current->flags |= PF_DUMPCORE;
1411 * Set up the notes in similar form to SVR4 core dumps made
1412 * with info from their /proc.
1415 fill_note(notes +0, "CORE", NT_PRSTATUS, sizeof(*prstatus), prstatus);
1417 fill_psinfo(psinfo, current->group_leader, current->mm);
1418 fill_note(notes +1, "CORE", NT_PRPSINFO, sizeof(*psinfo), psinfo);
1420 fill_note(notes +2, "CORE", NT_TASKSTRUCT, sizeof(*current), current);
1424 auxv = (elf_addr_t *) current->mm->saved_auxv;
1429 while (auxv[i - 2] != AT_NULL);
1430 fill_note(¬es[numnote++], "CORE", NT_AUXV,
1431 i * sizeof (elf_addr_t), auxv);
1433 /* Try to dump the FPU. */
1434 if ((prstatus->pr_fpvalid = elf_core_copy_task_fpregs(current, regs, fpu)))
1435 fill_note(notes + numnote++,
1436 "CORE", NT_PRFPREG, sizeof(*fpu), fpu);
1437 #ifdef ELF_CORE_COPY_XFPREGS
1438 if (elf_core_copy_task_xfpregs(current, xfpu))
1439 fill_note(notes + numnote++,
1440 "LINUX", NT_PRXFPREG, sizeof(*xfpu), xfpu);
1446 DUMP_WRITE(elf, sizeof(*elf));
1447 offset += sizeof(*elf); /* Elf header */
1448 offset += (segs+1) * sizeof(struct elf_phdr); /* Program headers */
1450 /* Write notes phdr entry */
1452 struct elf_phdr phdr;
1455 for (i = 0; i < numnote; i++)
1456 sz += notesize(notes + i);
1458 sz += thread_status_size;
1460 fill_elf_note_phdr(&phdr, sz, offset);
1462 DUMP_WRITE(&phdr, sizeof(phdr));
1465 /* Page-align dumped data */
1466 dataoff = offset = roundup(offset, ELF_EXEC_PAGESIZE);
1468 /* Write program headers for segments dump */
1469 for (vma = current->mm->mmap; vma != NULL; vma = vma->vm_next) {
1470 struct elf_phdr phdr;
1473 sz = vma->vm_end - vma->vm_start;
1475 phdr.p_type = PT_LOAD;
1476 phdr.p_offset = offset;
1477 phdr.p_vaddr = vma->vm_start;
1479 phdr.p_filesz = maydump(vma) ? sz : 0;
1481 offset += phdr.p_filesz;
1482 phdr.p_flags = vma->vm_flags & VM_READ ? PF_R : 0;
1483 if (vma->vm_flags & VM_WRITE) phdr.p_flags |= PF_W;
1484 if (vma->vm_flags & VM_EXEC) phdr.p_flags |= PF_X;
1485 phdr.p_align = ELF_EXEC_PAGESIZE;
1487 DUMP_WRITE(&phdr, sizeof(phdr));
1490 #ifdef ELF_CORE_WRITE_EXTRA_PHDRS
1491 ELF_CORE_WRITE_EXTRA_PHDRS;
1494 /* write out the notes section */
1495 for (i = 0; i < numnote; i++)
1496 if (!writenote(notes + i, file))
1499 /* write out the thread status notes section */
1500 list_for_each(t, &thread_list) {
1501 struct elf_thread_status *tmp = list_entry(t, struct elf_thread_status, list);
1502 for (i = 0; i < tmp->num_notes; i++)
1503 if (!writenote(&tmp->notes[i], file))
1509 for (vma = current->mm->mmap; vma != NULL; vma = vma->vm_next) {
1515 for (addr = vma->vm_start;
1517 addr += PAGE_SIZE) {
1519 struct vm_area_struct *vma;
1521 if (get_user_pages(current, current->mm, addr, 1, 0, 1,
1522 &page, &vma) <= 0) {
1523 DUMP_SEEK (file->f_pos + PAGE_SIZE);
1525 if (page == ZERO_PAGE(addr)) {
1526 DUMP_SEEK (file->f_pos + PAGE_SIZE);
1529 flush_cache_page(vma, addr);
1531 if ((size += PAGE_SIZE) > limit ||
1532 !dump_write(file, kaddr,
1535 page_cache_release(page);
1540 page_cache_release(page);
1545 #ifdef ELF_CORE_WRITE_EXTRA_DATA
1546 ELF_CORE_WRITE_EXTRA_DATA;
1549 if ((off_t) file->f_pos != offset) {
1551 printk("elf_core_dump: file->f_pos (%ld) != offset (%ld)\n",
1552 (off_t) file->f_pos, offset);
1559 while(!list_empty(&thread_list)) {
1560 struct list_head *tmp = thread_list.next;
1562 kfree(list_entry(tmp, struct elf_thread_status, list));
1570 #ifdef ELF_CORE_COPY_XFPREGS
1577 #endif /* USE_ELF_CORE_DUMP */
1579 static int __init init_elf_binfmt(void)
1581 return register_binfmt(&elf_format);
1584 static void __exit exit_elf_binfmt(void)
1586 /* Remove the COFF and ELF loaders. */
1587 unregister_binfmt(&elf_format);
1590 core_initcall(init_elf_binfmt);
1591 module_exit(exit_elf_binfmt);
1592 MODULE_LICENSE("GPL");