4 * Copyright (C) 1991, 1992 Linus Torvalds
8 * #!-checking implemented by tytso.
11 * Demand-loading implemented 01.12.91 - no need to read anything but
12 * the header into memory. The inode of the executable is put into
13 * "current->executable", and page faults do the actual loading. Clean.
15 * Once more I can proudly say that linux stood up to being changed: it
16 * was less than 2 hours work to get demand-loading completely implemented.
18 * Demand loading changed July 1993 by Eric Youngdale. Use mmap instead,
19 * current->executable is only used by the procfs. This allows a dispatch
20 * table to check for several different types of binary formats. We keep
21 * trying until we recognize the file or we run out of supported binary
25 #include <linux/config.h>
26 #include <linux/slab.h>
27 #include <linux/file.h>
28 #include <linux/mman.h>
29 #include <linux/a.out.h>
30 #include <linux/stat.h>
31 #include <linux/fcntl.h>
32 #include <linux/smp_lock.h>
33 #include <linux/init.h>
34 #include <linux/pagemap.h>
35 #include <linux/highmem.h>
36 #include <linux/spinlock.h>
37 #include <linux/key.h>
38 #include <linux/personality.h>
39 #include <linux/binfmts.h>
40 #include <linux/swap.h>
41 #include <linux/utsname.h>
42 #include <linux/module.h>
43 #include <linux/namei.h>
44 #include <linux/proc_fs.h>
45 #include <linux/ptrace.h>
46 #include <linux/mount.h>
47 #include <linux/security.h>
48 #include <linux/syscalls.h>
49 #include <linux/rmap.h>
50 #include <linux/acct.h>
51 #include <linux/vs_memory.h>
53 #include <asm/uaccess.h>
54 #include <asm/mmu_context.h>
57 #include <linux/kmod.h>
61 char core_pattern[65] = "core";
62 /* The maximal length of core_pattern is also specified in sysctl.c */
64 static struct linux_binfmt *formats;
65 static DEFINE_RWLOCK(binfmt_lock);
67 int register_binfmt(struct linux_binfmt * fmt)
69 struct linux_binfmt ** tmp = &formats;
75 write_lock(&binfmt_lock);
78 write_unlock(&binfmt_lock);
85 write_unlock(&binfmt_lock);
89 EXPORT_SYMBOL(register_binfmt);
91 int unregister_binfmt(struct linux_binfmt * fmt)
93 struct linux_binfmt ** tmp = &formats;
95 write_lock(&binfmt_lock);
99 write_unlock(&binfmt_lock);
104 write_unlock(&binfmt_lock);
108 EXPORT_SYMBOL(unregister_binfmt);
110 static inline void put_binfmt(struct linux_binfmt * fmt)
112 module_put(fmt->module);
116 * Note that a shared library must be both readable and executable due to
119 * Also note that we take the address to load from from the file itself.
121 asmlinkage long sys_uselib(const char __user * library)
127 nd.intent.open.flags = FMODE_READ;
128 error = __user_walk(library, LOOKUP_FOLLOW|LOOKUP_OPEN, &nd);
133 if (!S_ISREG(nd.dentry->d_inode->i_mode))
136 error = permission(nd.dentry->d_inode, MAY_READ | MAY_EXEC, &nd);
140 file = dentry_open(nd.dentry, nd.mnt, O_RDONLY);
141 error = PTR_ERR(file);
147 struct linux_binfmt * fmt;
149 read_lock(&binfmt_lock);
150 for (fmt = formats ; fmt ; fmt = fmt->next) {
151 if (!fmt->load_shlib)
153 if (!try_module_get(fmt->module))
155 read_unlock(&binfmt_lock);
156 error = fmt->load_shlib(file);
157 read_lock(&binfmt_lock);
159 if (error != -ENOEXEC)
162 read_unlock(&binfmt_lock);
173 * count() counts the number of strings in array ARGV.
175 static int count(char __user * __user * argv, int max)
183 if (get_user(p, argv))
197 * 'copy_strings()' copies argument/environment strings from user
198 * memory to free pages in kernel mem. These are in a format ready
199 * to be put directly into the top of new user memory.
201 static int copy_strings(int argc, char __user * __user * argv,
202 struct linux_binprm *bprm)
204 struct page *kmapped_page = NULL;
213 if (get_user(str, argv+argc) ||
214 !(len = strnlen_user(str, bprm->p))) {
225 /* XXX: add architecture specific overflow check here. */
230 int offset, bytes_to_copy;
233 offset = pos % PAGE_SIZE;
235 page = bprm->page[i];
238 page = alloc_page(GFP_HIGHUSER);
239 bprm->page[i] = page;
247 if (page != kmapped_page) {
249 kunmap(kmapped_page);
251 kaddr = kmap(kmapped_page);
254 memset(kaddr, 0, offset);
255 bytes_to_copy = PAGE_SIZE - offset;
256 if (bytes_to_copy > len) {
259 memset(kaddr+offset+len, 0,
260 PAGE_SIZE-offset-len);
262 err = copy_from_user(kaddr+offset, str, bytes_to_copy);
268 pos += bytes_to_copy;
269 str += bytes_to_copy;
270 len -= bytes_to_copy;
276 kunmap(kmapped_page);
281 * Like copy_strings, but get argv and its values from kernel memory.
283 int copy_strings_kernel(int argc,char ** argv, struct linux_binprm *bprm)
286 mm_segment_t oldfs = get_fs();
288 r = copy_strings(argc, (char __user * __user *)argv, bprm);
293 EXPORT_SYMBOL(copy_strings_kernel);
297 * This routine is used to map in a page into an address space: needed by
298 * execve() for the initial stack and environment pages.
300 * vma->vm_mm->mmap_sem is held for writing.
302 void install_arg_page(struct vm_area_struct *vma,
303 struct page *page, unsigned long address)
305 struct mm_struct *mm = vma->vm_mm;
311 if (unlikely(anon_vma_prepare(vma)))
314 flush_dcache_page(page);
315 pgd = pgd_offset(mm, address);
317 spin_lock(&mm->page_table_lock);
318 pud = pud_alloc(mm, pgd, address);
321 pmd = pmd_alloc(mm, pud, address);
324 pte = pte_alloc_map(mm, pmd, address);
327 if (!pte_none(*pte)) {
331 inc_mm_counter(mm, rss);
332 lru_cache_add_active(page);
333 set_pte_at(mm, address, pte, pte_mkdirty(pte_mkwrite(mk_pte(
334 page, vma->vm_page_prot))));
335 page_add_anon_rmap(page, vma, address);
337 spin_unlock(&mm->page_table_lock);
339 /* no need for flush_tlb */
342 spin_unlock(&mm->page_table_lock);
345 force_sig(SIGKILL, current);
348 #define EXTRA_STACK_VM_PAGES 20 /* random */
350 int setup_arg_pages(struct linux_binprm *bprm,
351 unsigned long stack_top,
352 int executable_stack)
354 unsigned long stack_base;
355 struct vm_area_struct *mpnt;
356 struct mm_struct *mm = current->mm;
360 #ifdef CONFIG_STACK_GROWSUP
361 /* Move the argument and environment strings to the bottom of the
367 /* Start by shifting all the pages down */
369 for (j = 0; j < MAX_ARG_PAGES; j++) {
370 struct page *page = bprm->page[j];
373 bprm->page[i++] = page;
376 /* Now move them within their pages */
377 offset = bprm->p % PAGE_SIZE;
378 to = kmap(bprm->page[0]);
379 for (j = 1; j < i; j++) {
380 memmove(to, to + offset, PAGE_SIZE - offset);
381 from = kmap(bprm->page[j]);
382 memcpy(to + PAGE_SIZE - offset, from, offset);
383 kunmap(bprm->page[j - 1]);
386 memmove(to, to + offset, PAGE_SIZE - offset);
387 kunmap(bprm->page[j - 1]);
389 /* Limit stack size to 1GB */
390 stack_base = current->signal->rlim[RLIMIT_STACK].rlim_max;
391 if (stack_base > (1 << 30))
392 stack_base = 1 << 30;
393 stack_base = PAGE_ALIGN(stack_top - stack_base);
395 /* Adjust bprm->p to point to the end of the strings. */
396 bprm->p = stack_base + PAGE_SIZE * i - offset;
398 mm->arg_start = stack_base;
399 arg_size = i << PAGE_SHIFT;
401 /* zero pages that were copied above */
402 while (i < MAX_ARG_PAGES)
403 bprm->page[i++] = NULL;
405 stack_base = arch_align_stack(stack_top - MAX_ARG_PAGES*PAGE_SIZE);
406 stack_base = PAGE_ALIGN(stack_base);
407 bprm->p += stack_base;
408 mm->arg_start = bprm->p;
409 arg_size = stack_top - (PAGE_MASK & (unsigned long) mm->arg_start);
412 arg_size += EXTRA_STACK_VM_PAGES * PAGE_SIZE;
415 bprm->loader += stack_base;
416 bprm->exec += stack_base;
418 mpnt = kmem_cache_alloc(vm_area_cachep, SLAB_KERNEL);
422 if (security_vm_enough_memory(arg_size >> PAGE_SHIFT) ||
423 !vx_vmpages_avail(mm, arg_size >> PAGE_SHIFT)) {
424 kmem_cache_free(vm_area_cachep, mpnt);
428 memset(mpnt, 0, sizeof(*mpnt));
430 down_write(&mm->mmap_sem);
433 #ifdef CONFIG_STACK_GROWSUP
434 mpnt->vm_start = stack_base;
435 mpnt->vm_end = stack_base + arg_size;
437 mpnt->vm_end = stack_top;
438 mpnt->vm_start = mpnt->vm_end - arg_size;
440 /* Adjust stack execute permissions; explicitly enable
441 * for EXSTACK_ENABLE_X, disable for EXSTACK_DISABLE_X
442 * and leave alone (arch default) otherwise. */
443 if (unlikely(executable_stack == EXSTACK_ENABLE_X))
444 mpnt->vm_flags = VM_STACK_FLAGS | VM_EXEC;
445 else if (executable_stack == EXSTACK_DISABLE_X)
446 mpnt->vm_flags = VM_STACK_FLAGS & ~VM_EXEC;
448 mpnt->vm_flags = VM_STACK_FLAGS;
449 mpnt->vm_flags |= mm->def_flags;
450 mpnt->vm_page_prot = protection_map[mpnt->vm_flags & 0x7];
451 if ((ret = insert_vm_struct(mm, mpnt))) {
452 up_write(&mm->mmap_sem);
453 kmem_cache_free(vm_area_cachep, mpnt);
456 vx_vmpages_sub(mm, mm->total_vm - vma_pages(mpnt));
457 mm->stack_vm = mm->total_vm;
460 for (i = 0 ; i < MAX_ARG_PAGES ; i++) {
461 struct page *page = bprm->page[i];
463 bprm->page[i] = NULL;
464 install_arg_page(mpnt, page, stack_base);
466 stack_base += PAGE_SIZE;
468 up_write(&mm->mmap_sem);
473 EXPORT_SYMBOL(setup_arg_pages);
475 #define free_arg_pages(bprm) do { } while (0)
479 static inline void free_arg_pages(struct linux_binprm *bprm)
483 for (i = 0; i < MAX_ARG_PAGES; i++) {
485 __free_page(bprm->page[i]);
486 bprm->page[i] = NULL;
490 #endif /* CONFIG_MMU */
492 struct file *open_exec(const char *name)
498 nd.intent.open.flags = FMODE_READ;
499 err = path_lookup(name, LOOKUP_FOLLOW|LOOKUP_OPEN, &nd);
503 struct inode *inode = nd.dentry->d_inode;
504 file = ERR_PTR(-EACCES);
505 if (!(nd.mnt->mnt_flags & MNT_NOEXEC) &&
506 S_ISREG(inode->i_mode)) {
507 int err = permission(inode, MAY_EXEC, &nd);
508 if (!err && !(inode->i_mode & 0111))
512 file = dentry_open(nd.dentry, nd.mnt, O_RDONLY);
514 err = deny_write_access(file);
529 EXPORT_SYMBOL(open_exec);
531 int kernel_read(struct file *file, unsigned long offset,
532 char *addr, unsigned long count)
540 /* The cast to a user pointer is valid due to the set_fs() */
541 result = vfs_read(file, (void __user *)addr, count, &pos);
546 EXPORT_SYMBOL(kernel_read);
548 static int exec_mmap(struct mm_struct *mm)
550 struct task_struct *tsk;
551 struct mm_struct * old_mm, *active_mm;
553 /* Notify parent that we're no longer interested in the old VM */
555 old_mm = current->mm;
556 mm_release(tsk, old_mm);
560 * Make sure that if there is a core dump in progress
561 * for the old mm, we get out and die instead of going
562 * through with the exec. We must hold mmap_sem around
563 * checking core_waiters and changing tsk->mm. The
564 * core-inducing thread will increment core_waiters for
565 * each thread whose ->mm == old_mm.
567 down_read(&old_mm->mmap_sem);
568 if (unlikely(old_mm->core_waiters)) {
569 up_read(&old_mm->mmap_sem);
574 active_mm = tsk->active_mm;
577 activate_mm(active_mm, mm);
579 arch_pick_mmap_layout(mm);
581 up_read(&old_mm->mmap_sem);
582 if (active_mm != old_mm) BUG();
591 * This function makes sure the current process has its own signal table,
592 * so that flush_signal_handlers can later reset the handlers without
593 * disturbing other processes. (Other processes might share the signal
594 * table via the CLONE_SIGHAND option to clone().)
596 static inline int de_thread(struct task_struct *tsk)
598 struct signal_struct *sig = tsk->signal;
599 struct sighand_struct *newsighand, *oldsighand = tsk->sighand;
600 spinlock_t *lock = &oldsighand->siglock;
604 * If we don't share sighandlers, then we aren't sharing anything
605 * and we can just re-use it all.
607 if (atomic_read(&oldsighand->count) <= 1) {
608 BUG_ON(atomic_read(&sig->count) != 1);
613 newsighand = kmem_cache_alloc(sighand_cachep, GFP_KERNEL);
617 if (thread_group_empty(current))
618 goto no_thread_group;
621 * Kill all other threads in the thread group.
622 * We must hold tasklist_lock to call zap_other_threads.
624 read_lock(&tasklist_lock);
626 if (sig->flags & SIGNAL_GROUP_EXIT) {
628 * Another group action in progress, just
629 * return so that the signal is processed.
631 spin_unlock_irq(lock);
632 read_unlock(&tasklist_lock);
633 kmem_cache_free(sighand_cachep, newsighand);
636 zap_other_threads(current);
637 read_unlock(&tasklist_lock);
640 * Account for the thread group leader hanging around:
643 if (thread_group_leader(current))
645 while (atomic_read(&sig->count) > count) {
646 sig->group_exit_task = current;
647 sig->notify_count = count;
648 __set_current_state(TASK_UNINTERRUPTIBLE);
649 spin_unlock_irq(lock);
653 sig->group_exit_task = NULL;
654 sig->notify_count = 0;
655 sig->real_timer.data = (unsigned long)current;
656 spin_unlock_irq(lock);
659 * At this point all other threads have exited, all we have to
660 * do is to wait for the thread group leader to become inactive,
661 * and to assume its PID:
663 if (!thread_group_leader(current)) {
664 struct task_struct *leader = current->group_leader, *parent;
665 struct dentry *proc_dentry1, *proc_dentry2;
666 unsigned long exit_state, ptrace;
669 * Wait for the thread group leader to be a zombie.
670 * It should already be zombie at this point, most
673 while (leader->exit_state != EXIT_ZOMBIE)
676 spin_lock(&leader->proc_lock);
677 spin_lock(¤t->proc_lock);
678 proc_dentry1 = proc_pid_unhash(current);
679 proc_dentry2 = proc_pid_unhash(leader);
680 write_lock_irq(&tasklist_lock);
682 if (leader->tgid != current->tgid)
684 if (current->pid == current->tgid)
687 * An exec() starts a new thread group with the
688 * TGID of the previous thread group. Rehash the
689 * two threads with a switched PID, and release
690 * the former thread group leader:
692 ptrace = leader->ptrace;
693 parent = leader->parent;
694 if (unlikely(ptrace) && unlikely(parent == current)) {
696 * Joker was ptracing his own group leader,
697 * and now he wants to be his own parent!
698 * We can't have that.
703 ptrace_unlink(current);
704 ptrace_unlink(leader);
705 remove_parent(current);
706 remove_parent(leader);
708 switch_exec_pids(leader, current);
710 current->parent = current->real_parent = leader->real_parent;
711 leader->parent = leader->real_parent = child_reaper;
712 current->group_leader = current;
713 leader->group_leader = leader;
715 add_parent(current, current->parent);
716 add_parent(leader, leader->parent);
718 current->ptrace = ptrace;
719 __ptrace_link(current, parent);
722 list_del(¤t->tasks);
723 list_add_tail(¤t->tasks, &init_task.tasks);
724 current->exit_signal = SIGCHLD;
725 exit_state = leader->exit_state;
727 write_unlock_irq(&tasklist_lock);
728 spin_unlock(&leader->proc_lock);
729 spin_unlock(¤t->proc_lock);
730 proc_pid_flush(proc_dentry1);
731 proc_pid_flush(proc_dentry2);
733 if (exit_state != EXIT_ZOMBIE)
735 release_task(leader);
739 * Now there are really no other threads at all,
740 * so it's safe to stop telling them to kill themselves.
745 BUG_ON(atomic_read(&sig->count) != 1);
748 if (atomic_read(&oldsighand->count) == 1) {
750 * Now that we nuked the rest of the thread group,
751 * it turns out we are not sharing sighand any more either.
752 * So we can just keep it.
754 kmem_cache_free(sighand_cachep, newsighand);
757 * Move our state over to newsighand and switch it in.
759 spin_lock_init(&newsighand->siglock);
760 atomic_set(&newsighand->count, 1);
761 memcpy(newsighand->action, oldsighand->action,
762 sizeof(newsighand->action));
764 write_lock_irq(&tasklist_lock);
765 spin_lock(&oldsighand->siglock);
766 spin_lock(&newsighand->siglock);
768 current->sighand = newsighand;
771 spin_unlock(&newsighand->siglock);
772 spin_unlock(&oldsighand->siglock);
773 write_unlock_irq(&tasklist_lock);
775 if (atomic_dec_and_test(&oldsighand->count))
776 kmem_cache_free(sighand_cachep, oldsighand);
779 if (!thread_group_empty(current))
781 if (!thread_group_leader(current))
787 * These functions flushes out all traces of the currently running executable
788 * so that a new one can be started
791 static inline void flush_old_files(struct files_struct * files)
795 spin_lock(&files->file_lock);
797 unsigned long set, i;
801 if (i >= files->max_fds || i >= files->max_fdset)
803 set = files->close_on_exec->fds_bits[j];
806 files->close_on_exec->fds_bits[j] = 0;
807 spin_unlock(&files->file_lock);
808 for ( ; set ; i++,set >>= 1) {
813 spin_lock(&files->file_lock);
816 spin_unlock(&files->file_lock);
819 void get_task_comm(char *buf, struct task_struct *tsk)
821 /* buf must be at least sizeof(tsk->comm) in size */
823 strncpy(buf, tsk->comm, sizeof(tsk->comm));
827 void set_task_comm(struct task_struct *tsk, char *buf)
830 strlcpy(tsk->comm, buf, sizeof(tsk->comm));
834 int flush_old_exec(struct linux_binprm * bprm)
838 struct files_struct *files;
839 char tcomm[sizeof(current->comm)];
842 * Make sure we have a private signal table and that
843 * we are unassociated from the previous thread group.
845 retval = de_thread(current);
850 * Make sure we have private file handles. Ask the
851 * fork helper to do the work for us and the exit
852 * helper to do the cleanup of the old one.
854 files = current->files; /* refcounted so safe to hold */
855 retval = unshare_files();
859 * Release all of the old mmap stuff
861 retval = exec_mmap(bprm->mm);
865 bprm->mm = NULL; /* We're using it now */
867 /* This is the point of no return */
869 put_files_struct(files);
871 current->sas_ss_sp = current->sas_ss_size = 0;
873 if (current->euid == current->uid && current->egid == current->gid)
874 current->mm->dumpable = 1;
875 name = bprm->filename;
877 /* Copies the binary name from after last slash */
878 for (i=0; (ch = *(name++)) != '\0';) {
880 i = 0; /* overwrite what we wrote */
882 if (i < (sizeof(tcomm) - 1))
886 set_task_comm(current, tcomm);
888 current->flags &= ~PF_RANDOMIZE;
891 if (bprm->e_uid != current->euid || bprm->e_gid != current->egid ||
892 permission(bprm->file->f_dentry->d_inode,MAY_READ, NULL) ||
893 (bprm->interp_flags & BINPRM_FLAGS_ENFORCE_NONDUMP)) {
895 current->mm->dumpable = 0;
898 /* An exec changes our domain. We are no longer part of the thread
901 current->self_exec_id++;
903 flush_signal_handlers(current, 0);
904 flush_old_files(current->files);
909 put_files_struct(current->files);
910 current->files = files;
915 EXPORT_SYMBOL(flush_old_exec);
918 * Fill the binprm structure from the inode.
919 * Check permissions, then read the first 128 (BINPRM_BUF_SIZE) bytes
921 int prepare_binprm(struct linux_binprm *bprm)
924 struct inode * inode = bprm->file->f_dentry->d_inode;
927 mode = inode->i_mode;
929 * Check execute perms again - if the caller has CAP_DAC_OVERRIDE,
930 * generic_permission lets a non-executable through
932 if (!(mode & 0111)) /* with at least _one_ execute bit set */
934 if (bprm->file->f_op == NULL)
937 bprm->e_uid = current->euid;
938 bprm->e_gid = current->egid;
940 if(!(bprm->file->f_vfsmnt->mnt_flags & MNT_NOSUID)) {
942 if (mode & S_ISUID) {
943 current->personality &= ~PER_CLEAR_ON_SETID;
944 bprm->e_uid = inode->i_uid;
949 * If setgid is set but no group execute bit then this
950 * is a candidate for mandatory locking, not a setgid
953 if ((mode & (S_ISGID | S_IXGRP)) == (S_ISGID | S_IXGRP)) {
954 current->personality &= ~PER_CLEAR_ON_SETID;
955 bprm->e_gid = inode->i_gid;
959 /* fill in binprm security blob */
960 retval = security_bprm_set(bprm);
964 memset(bprm->buf,0,BINPRM_BUF_SIZE);
965 return kernel_read(bprm->file,0,bprm->buf,BINPRM_BUF_SIZE);
968 EXPORT_SYMBOL(prepare_binprm);
970 static inline int unsafe_exec(struct task_struct *p)
973 if (p->ptrace & PT_PTRACED) {
974 if (p->ptrace & PT_PTRACE_CAP)
975 unsafe |= LSM_UNSAFE_PTRACE_CAP;
977 unsafe |= LSM_UNSAFE_PTRACE;
979 if (atomic_read(&p->fs->count) > 1 ||
980 atomic_read(&p->files->count) > 1 ||
981 atomic_read(&p->sighand->count) > 1)
982 unsafe |= LSM_UNSAFE_SHARE;
987 void compute_creds(struct linux_binprm *bprm)
991 if (bprm->e_uid != current->uid)
996 unsafe = unsafe_exec(current);
997 security_bprm_apply_creds(bprm, unsafe);
998 task_unlock(current);
999 security_bprm_post_apply_creds(bprm);
1002 EXPORT_SYMBOL(compute_creds);
1004 void remove_arg_zero(struct linux_binprm *bprm)
1007 unsigned long offset;
1011 offset = bprm->p % PAGE_SIZE;
1014 while (bprm->p++, *(kaddr+offset++)) {
1015 if (offset != PAGE_SIZE)
1018 kunmap_atomic(kaddr, KM_USER0);
1020 page = bprm->page[bprm->p/PAGE_SIZE];
1021 kaddr = kmap_atomic(page, KM_USER0);
1023 kunmap_atomic(kaddr, KM_USER0);
1028 EXPORT_SYMBOL(remove_arg_zero);
1031 * cycle the list of binary formats handler, until one recognizes the image
1033 int search_binary_handler(struct linux_binprm *bprm,struct pt_regs *regs)
1036 struct linux_binfmt *fmt;
1038 /* handle /sbin/loader.. */
1040 struct exec * eh = (struct exec *) bprm->buf;
1042 if (!bprm->loader && eh->fh.f_magic == 0x183 &&
1043 (eh->fh.f_flags & 0x3000) == 0x3000)
1046 unsigned long loader;
1048 allow_write_access(bprm->file);
1052 loader = PAGE_SIZE*MAX_ARG_PAGES-sizeof(void *);
1054 file = open_exec("/sbin/loader");
1055 retval = PTR_ERR(file);
1059 /* Remember if the application is TASO. */
1060 bprm->sh_bang = eh->ah.entry < 0x100000000UL;
1063 bprm->loader = loader;
1064 retval = prepare_binprm(bprm);
1067 /* should call search_binary_handler recursively here,
1068 but it does not matter */
1072 retval = security_bprm_check(bprm);
1076 /* kernel module loader fixup */
1077 /* so we don't try to load run modprobe in kernel space. */
1080 for (try=0; try<2; try++) {
1081 read_lock(&binfmt_lock);
1082 for (fmt = formats ; fmt ; fmt = fmt->next) {
1083 int (*fn)(struct linux_binprm *, struct pt_regs *) = fmt->load_binary;
1086 if (!try_module_get(fmt->module))
1088 read_unlock(&binfmt_lock);
1089 retval = fn(bprm, regs);
1092 allow_write_access(bprm->file);
1096 current->did_exec = 1;
1099 read_lock(&binfmt_lock);
1101 if (retval != -ENOEXEC || bprm->mm == NULL)
1104 read_unlock(&binfmt_lock);
1108 read_unlock(&binfmt_lock);
1109 if (retval != -ENOEXEC || bprm->mm == NULL) {
1113 #define printable(c) (((c)=='\t') || ((c)=='\n') || (0x20<=(c) && (c)<=0x7e))
1114 if (printable(bprm->buf[0]) &&
1115 printable(bprm->buf[1]) &&
1116 printable(bprm->buf[2]) &&
1117 printable(bprm->buf[3]))
1118 break; /* -ENOEXEC */
1119 request_module("binfmt-%04x", *(unsigned short *)(&bprm->buf[2]));
1126 EXPORT_SYMBOL(search_binary_handler);
1129 * sys_execve() executes a new program.
1131 int do_execve(char * filename,
1132 char __user *__user *argv,
1133 char __user *__user *envp,
1134 struct pt_regs * regs)
1136 struct linux_binprm *bprm;
1142 bprm = kmalloc(sizeof(*bprm), GFP_KERNEL);
1145 memset(bprm, 0, sizeof(*bprm));
1147 file = open_exec(filename);
1148 retval = PTR_ERR(file);
1154 bprm->p = PAGE_SIZE*MAX_ARG_PAGES-sizeof(void *);
1157 bprm->filename = filename;
1158 bprm->interp = filename;
1159 bprm->mm = mm_alloc();
1164 retval = init_new_context(current, bprm->mm);
1168 bprm->argc = count(argv, bprm->p / sizeof(void *));
1169 if ((retval = bprm->argc) < 0)
1172 bprm->envc = count(envp, bprm->p / sizeof(void *));
1173 if ((retval = bprm->envc) < 0)
1176 retval = security_bprm_alloc(bprm);
1180 retval = prepare_binprm(bprm);
1184 retval = copy_strings_kernel(1, &bprm->filename, bprm);
1188 bprm->exec = bprm->p;
1189 retval = copy_strings(bprm->envc, envp, bprm);
1193 retval = copy_strings(bprm->argc, argv, bprm);
1197 retval = search_binary_handler(bprm,regs);
1199 free_arg_pages(bprm);
1201 /* execve success */
1202 security_bprm_free(bprm);
1203 acct_update_integrals(current);
1204 update_mem_hiwater(current);
1210 /* Something went wrong, return the inode and free the argument pages*/
1211 for (i = 0 ; i < MAX_ARG_PAGES ; i++) {
1212 struct page * page = bprm->page[i];
1218 security_bprm_free(bprm);
1226 allow_write_access(bprm->file);
1237 int set_binfmt(struct linux_binfmt *new)
1239 struct linux_binfmt *old = current->binfmt;
1242 if (!try_module_get(new->module))
1245 current->binfmt = new;
1247 module_put(old->module);
1251 EXPORT_SYMBOL(set_binfmt);
1253 #define CORENAME_MAX_SIZE 64
1255 /* format_corename will inspect the pattern parameter, and output a
1256 * name into corename, which must have space for at least
1257 * CORENAME_MAX_SIZE bytes plus one byte for the zero terminator.
1259 static void format_corename(char *corename, const char *pattern, long signr)
1261 const char *pat_ptr = pattern;
1262 char *out_ptr = corename;
1263 char *const out_end = corename + CORENAME_MAX_SIZE;
1265 int pid_in_pattern = 0;
1267 /* Repeat as long as we have more pattern to process and more output
1270 if (*pat_ptr != '%') {
1271 if (out_ptr == out_end)
1273 *out_ptr++ = *pat_ptr++;
1275 switch (*++pat_ptr) {
1278 /* Double percent, output one percent */
1280 if (out_ptr == out_end)
1287 rc = snprintf(out_ptr, out_end - out_ptr,
1288 "%d", current->tgid);
1289 if (rc > out_end - out_ptr)
1295 rc = snprintf(out_ptr, out_end - out_ptr,
1296 "%d", current->uid);
1297 if (rc > out_end - out_ptr)
1303 rc = snprintf(out_ptr, out_end - out_ptr,
1304 "%d", current->gid);
1305 if (rc > out_end - out_ptr)
1309 /* signal that caused the coredump */
1311 rc = snprintf(out_ptr, out_end - out_ptr,
1313 if (rc > out_end - out_ptr)
1317 /* UNIX time of coredump */
1320 do_gettimeofday(&tv);
1321 rc = snprintf(out_ptr, out_end - out_ptr,
1323 if (rc > out_end - out_ptr)
1330 down_read(&uts_sem);
1331 rc = snprintf(out_ptr, out_end - out_ptr,
1332 "%s", system_utsname.nodename);
1334 if (rc > out_end - out_ptr)
1340 rc = snprintf(out_ptr, out_end - out_ptr,
1341 "%s", current->comm);
1342 if (rc > out_end - out_ptr)
1352 /* Backward compatibility with core_uses_pid:
1354 * If core_pattern does not include a %p (as is the default)
1355 * and core_uses_pid is set, then .%pid will be appended to
1358 && (core_uses_pid || atomic_read(¤t->mm->mm_users) != 1)) {
1359 rc = snprintf(out_ptr, out_end - out_ptr,
1360 ".%d", current->tgid);
1361 if (rc > out_end - out_ptr)
1369 static void zap_threads (struct mm_struct *mm)
1371 struct task_struct *g, *p;
1372 struct task_struct *tsk = current;
1373 struct completion *vfork_done = tsk->vfork_done;
1377 * Make sure nobody is waiting for us to release the VM,
1378 * otherwise we can deadlock when we wait on each other
1381 tsk->vfork_done = NULL;
1382 complete(vfork_done);
1385 read_lock(&tasklist_lock);
1387 if (mm == p->mm && p != tsk) {
1388 force_sig_specific(SIGKILL, p);
1390 if (unlikely(p->ptrace) &&
1391 unlikely(p->parent->mm == mm))
1394 while_each_thread(g,p);
1396 read_unlock(&tasklist_lock);
1398 if (unlikely(traced)) {
1400 * We are zapping a thread and the thread it ptraces.
1401 * If the tracee went into a ptrace stop for exit tracing,
1402 * we could deadlock since the tracer is waiting for this
1403 * coredump to finish. Detach them so they can both die.
1405 write_lock_irq(&tasklist_lock);
1406 do_each_thread(g,p) {
1407 if (mm == p->mm && p != tsk &&
1408 p->ptrace && p->parent->mm == mm) {
1411 } while_each_thread(g,p);
1412 write_unlock_irq(&tasklist_lock);
1416 static void coredump_wait(struct mm_struct *mm)
1418 DECLARE_COMPLETION(startup_done);
1420 mm->core_waiters++; /* let other threads block */
1421 mm->core_startup_done = &startup_done;
1423 /* give other threads a chance to run: */
1427 if (--mm->core_waiters) {
1428 up_write(&mm->mmap_sem);
1429 wait_for_completion(&startup_done);
1431 up_write(&mm->mmap_sem);
1432 BUG_ON(mm->core_waiters);
1435 int do_coredump(long signr, int exit_code, struct pt_regs * regs)
1437 char corename[CORENAME_MAX_SIZE + 1];
1438 struct mm_struct *mm = current->mm;
1439 struct linux_binfmt * binfmt;
1440 struct inode * inode;
1444 binfmt = current->binfmt;
1445 if (!binfmt || !binfmt->core_dump)
1447 down_write(&mm->mmap_sem);
1448 if (!mm->dumpable) {
1449 up_write(&mm->mmap_sem);
1453 init_completion(&mm->core_done);
1454 spin_lock_irq(¤t->sighand->siglock);
1455 current->signal->flags = SIGNAL_GROUP_EXIT;
1456 current->signal->group_exit_code = exit_code;
1457 spin_unlock_irq(¤t->sighand->siglock);
1461 * Clear any false indication of pending signals that might
1462 * be seen by the filesystem code called to write the core file.
1464 current->signal->group_stop_count = 0;
1465 clear_thread_flag(TIF_SIGPENDING);
1467 if (current->signal->rlim[RLIMIT_CORE].rlim_cur < binfmt->min_coredump)
1471 * lock_kernel() because format_corename() is controlled by sysctl, which
1472 * uses lock_kernel()
1475 format_corename(corename, core_pattern, signr);
1477 file = filp_open(corename, O_CREAT | 2 | O_NOFOLLOW | O_LARGEFILE, 0600);
1480 inode = file->f_dentry->d_inode;
1481 if (inode->i_nlink > 1)
1482 goto close_fail; /* multiple links - don't dump */
1483 if (d_unhashed(file->f_dentry))
1486 if (!S_ISREG(inode->i_mode))
1490 if (!file->f_op->write)
1492 if (do_truncate(file->f_dentry, 0) != 0)
1495 retval = binfmt->core_dump(signr, regs, file);
1498 current->signal->group_exit_code |= 0x80;
1500 filp_close(file, NULL);
1502 complete_all(&mm->core_done);