4 * Copyright (C) 1991, 1992 Linus Torvalds
8 * #!-checking implemented by tytso.
11 * Demand-loading implemented 01.12.91 - no need to read anything but
12 * the header into memory. The inode of the executable is put into
13 * "current->executable", and page faults do the actual loading. Clean.
15 * Once more I can proudly say that linux stood up to being changed: it
16 * was less than 2 hours work to get demand-loading completely implemented.
18 * Demand loading changed July 1993 by Eric Youngdale. Use mmap instead,
19 * current->executable is only used by the procfs. This allows a dispatch
20 * table to check for several different types of binary formats. We keep
21 * trying until we recognize the file or we run out of supported binary
25 #include <linux/config.h>
26 #include <linux/slab.h>
27 #include <linux/file.h>
28 #include <linux/mman.h>
29 #include <linux/a.out.h>
30 #include <linux/stat.h>
31 #include <linux/fcntl.h>
32 #include <linux/smp_lock.h>
33 #include <linux/init.h>
34 #include <linux/pagemap.h>
35 #include <linux/highmem.h>
36 #include <linux/spinlock.h>
37 #include <linux/personality.h>
38 #include <linux/binfmts.h>
39 #include <linux/swap.h>
40 #include <linux/utsname.h>
41 #include <linux/module.h>
42 #include <linux/namei.h>
43 #include <linux/proc_fs.h>
44 #include <linux/ptrace.h>
45 #include <linux/mount.h>
46 #include <linux/security.h>
47 #include <linux/syscalls.h>
48 #include <linux/rmap.h>
49 #include <linux/ckrm.h>
50 #include <linux/vs_memory.h>
51 #include <linux/ckrm_mem.h>
53 #include <asm/uaccess.h>
54 #include <asm/mmu_context.h>
57 #include <linux/kmod.h>
61 char core_pattern[65] = "core";
62 int suid_dumpable = 0;
64 EXPORT_SYMBOL(suid_dumpable);
66 /* The maximal length of core_pattern is also specified in sysctl.c */
68 static struct linux_binfmt *formats;
69 static rwlock_t binfmt_lock = RW_LOCK_UNLOCKED;
71 int register_binfmt(struct linux_binfmt * fmt)
73 struct linux_binfmt ** tmp = &formats;
79 write_lock(&binfmt_lock);
82 write_unlock(&binfmt_lock);
89 write_unlock(&binfmt_lock);
93 EXPORT_SYMBOL(register_binfmt);
95 int unregister_binfmt(struct linux_binfmt * fmt)
97 struct linux_binfmt ** tmp = &formats;
99 write_lock(&binfmt_lock);
103 write_unlock(&binfmt_lock);
108 write_unlock(&binfmt_lock);
112 EXPORT_SYMBOL(unregister_binfmt);
114 static inline void put_binfmt(struct linux_binfmt * fmt)
116 module_put(fmt->module);
120 * Note that a shared library must be both readable and executable due to
123 * Also note that we take the address to load from from the file itself.
125 asmlinkage long sys_uselib(const char __user * library)
131 nd.intent.open.flags = FMODE_READ;
132 error = __user_walk(library, LOOKUP_FOLLOW|LOOKUP_OPEN, &nd);
137 if (!S_ISREG(nd.dentry->d_inode->i_mode))
140 error = permission(nd.dentry->d_inode, MAY_READ | MAY_EXEC, &nd);
144 file = dentry_open(nd.dentry, nd.mnt, O_RDONLY);
145 error = PTR_ERR(file);
151 struct linux_binfmt * fmt;
153 read_lock(&binfmt_lock);
154 for (fmt = formats ; fmt ; fmt = fmt->next) {
155 if (!fmt->load_shlib)
157 if (!try_module_get(fmt->module))
159 read_unlock(&binfmt_lock);
160 error = fmt->load_shlib(file);
161 read_lock(&binfmt_lock);
163 if (error != -ENOEXEC)
166 read_unlock(&binfmt_lock);
177 * count() counts the number of strings in array ARGV.
179 static int count(char __user * __user * argv, int max)
187 if (get_user(p, argv))
200 * 'copy_strings()' copies argument/environment strings from user
201 * memory to free pages in kernel mem. These are in a format ready
202 * to be put directly into the top of new user memory.
204 int copy_strings(int argc,char __user * __user * argv, struct linux_binprm *bprm)
206 struct page *kmapped_page = NULL;
215 if (get_user(str, argv+argc) ||
216 !(len = strnlen_user(str, bprm->p))) {
227 /* XXX: add architecture specific overflow check here. */
232 int offset, bytes_to_copy;
235 offset = pos % PAGE_SIZE;
237 page = bprm->page[i];
240 page = alloc_page(GFP_HIGHUSER);
241 bprm->page[i] = page;
249 if (page != kmapped_page) {
251 kunmap(kmapped_page);
253 kaddr = kmap(kmapped_page);
256 memset(kaddr, 0, offset);
257 bytes_to_copy = PAGE_SIZE - offset;
258 if (bytes_to_copy > len) {
261 memset(kaddr+offset+len, 0,
262 PAGE_SIZE-offset-len);
264 err = copy_from_user(kaddr+offset, str, bytes_to_copy);
270 pos += bytes_to_copy;
271 str += bytes_to_copy;
272 len -= bytes_to_copy;
278 kunmap(kmapped_page);
283 * Like copy_strings, but get argv and its values from kernel memory.
285 int copy_strings_kernel(int argc,char ** argv, struct linux_binprm *bprm)
288 mm_segment_t oldfs = get_fs();
290 r = copy_strings(argc, (char __user * __user *)argv, bprm);
295 EXPORT_SYMBOL(copy_strings_kernel);
299 * This routine is used to map in a page into an address space: needed by
300 * execve() for the initial stack and environment pages.
302 * vma->vm_mm->mmap_sem is held for writing.
304 void install_arg_page(struct vm_area_struct *vma,
305 struct page *page, unsigned long address)
307 struct mm_struct *mm = vma->vm_mm;
312 if (unlikely(anon_vma_prepare(vma)))
315 flush_dcache_page(page);
316 pgd = pgd_offset(mm, address);
318 spin_lock(&mm->page_table_lock);
319 pmd = pmd_alloc(mm, pgd, address);
322 pte = pte_alloc_map(mm, pmd, address);
325 if (!pte_none(*pte)) {
331 lru_cache_add_active(page);
332 set_pte(pte, pte_mkdirty(pte_mkwrite(mk_pte(
333 page, vma->vm_page_prot))));
334 page_add_anon_rmap(page, vma, address);
336 spin_unlock(&mm->page_table_lock);
338 /* no need for flush_tlb */
341 spin_unlock(&mm->page_table_lock);
344 force_sig(SIGKILL, current);
347 int setup_arg_pages(struct linux_binprm *bprm, int executable_stack)
349 unsigned long stack_base;
350 struct vm_area_struct *mpnt;
351 struct mm_struct *mm = current->mm;
355 #ifdef CONFIG_STACK_GROWSUP
356 /* Move the argument and environment strings to the bottom of the
362 /* Start by shifting all the pages down */
364 for (j = 0; j < MAX_ARG_PAGES; j++) {
365 struct page *page = bprm->page[j];
368 bprm->page[i++] = page;
371 /* Now move them within their pages */
372 offset = bprm->p % PAGE_SIZE;
373 to = kmap(bprm->page[0]);
374 for (j = 1; j < i; j++) {
375 memmove(to, to + offset, PAGE_SIZE - offset);
376 from = kmap(bprm->page[j]);
377 memcpy(to + PAGE_SIZE - offset, from, offset);
378 kunmap(bprm->page[j - 1]);
381 memmove(to, to + offset, PAGE_SIZE - offset);
382 kunmap(bprm->page[j - 1]);
384 /* Adjust bprm->p to point to the end of the strings. */
385 bprm->p = PAGE_SIZE * i - offset;
387 /* Limit stack size to 1GB */
388 stack_base = current->rlim[RLIMIT_STACK].rlim_max;
389 if (stack_base > (1 << 30))
390 stack_base = 1 << 30;
391 stack_base = PAGE_ALIGN(STACK_TOP - stack_base);
393 mm->arg_start = stack_base;
394 arg_size = i << PAGE_SHIFT;
396 /* zero pages that were copied above */
397 while (i < MAX_ARG_PAGES)
398 bprm->page[i++] = NULL;
400 #ifdef __HAVE_ARCH_ALIGN_STACK
401 stack_base = arch_align_stack(STACK_TOP - MAX_ARG_PAGES*PAGE_SIZE);
402 stack_base = PAGE_ALIGN(stack_base);
404 stack_base = STACK_TOP - MAX_ARG_PAGES * PAGE_SIZE;
406 mm->arg_start = bprm->p + stack_base;
407 arg_size = STACK_TOP - (PAGE_MASK & (unsigned long) mm->arg_start);
410 bprm->p += stack_base;
412 bprm->loader += stack_base;
413 bprm->exec += stack_base;
415 mpnt = kmem_cache_alloc(vm_area_cachep, SLAB_KERNEL);
419 if (security_vm_enough_memory(arg_size >> PAGE_SHIFT) ||
420 !vx_vmpages_avail(mm, arg_size >> PAGE_SHIFT)) {
421 kmem_cache_free(vm_area_cachep, mpnt);
425 memset(mpnt, 0, sizeof(*mpnt));
427 down_write(&mm->mmap_sem);
430 #ifdef CONFIG_STACK_GROWSUP
431 mpnt->vm_start = stack_base;
432 mpnt->vm_end = PAGE_MASK &
433 (PAGE_SIZE - 1 + (unsigned long) bprm->p);
435 mpnt->vm_start = PAGE_MASK & (unsigned long) bprm->p;
436 mpnt->vm_end = STACK_TOP;
438 /* Adjust stack execute permissions; explicitly enable
439 * for EXSTACK_ENABLE_X, disable for EXSTACK_DISABLE_X
440 * and leave alone (arch default) otherwise. */
441 if (unlikely(executable_stack == EXSTACK_ENABLE_X))
442 mpnt->vm_flags = VM_STACK_FLAGS | VM_EXEC;
443 else if (executable_stack == EXSTACK_DISABLE_X)
444 mpnt->vm_flags = VM_STACK_FLAGS & ~VM_EXEC;
446 mpnt->vm_flags = VM_STACK_FLAGS;
447 mpnt->vm_flags |= mm->def_flags;
448 mpnt->vm_page_prot = protection_map[mpnt->vm_flags & 0x7];
449 if ((ret = insert_vm_struct(mm, mpnt))) {
450 up_write(&mm->mmap_sem);
451 kmem_cache_free(vm_area_cachep, mpnt);
454 // mm->stack_vm = mm->total_vm = vma_pages(mpnt);
455 vx_vmpages_sub(mm, mm->total_vm - vma_pages(mpnt));
456 mm->stack_vm = mm->total_vm;
459 for (i = 0 ; i < MAX_ARG_PAGES ; i++) {
460 struct page *page = bprm->page[i];
462 bprm->page[i] = NULL;
463 install_arg_page(mpnt, page, stack_base);
465 stack_base += PAGE_SIZE;
467 up_write(&mm->mmap_sem);
472 EXPORT_SYMBOL(setup_arg_pages);
474 #define free_arg_pages(bprm) do { } while (0)
478 static inline void free_arg_pages(struct linux_binprm *bprm)
482 for (i = 0; i < MAX_ARG_PAGES; i++) {
484 __free_page(bprm->page[i]);
485 bprm->page[i] = NULL;
489 #endif /* CONFIG_MMU */
491 struct file *open_exec(const char *name)
497 nd.intent.open.flags = FMODE_READ;
498 err = path_lookup(name, LOOKUP_FOLLOW|LOOKUP_OPEN, &nd);
502 struct inode *inode = nd.dentry->d_inode;
503 file = ERR_PTR(-EACCES);
504 if (!(nd.mnt->mnt_flags & MNT_NOEXEC) &&
505 S_ISREG(inode->i_mode)) {
506 int err = permission(inode, MAY_EXEC, &nd);
507 if (!err && !(inode->i_mode & 0111))
511 file = dentry_open(nd.dentry, nd.mnt, O_RDONLY);
513 err = deny_write_access(file);
528 EXPORT_SYMBOL(open_exec);
530 int kernel_read(struct file *file, unsigned long offset,
531 char *addr, unsigned long count)
539 /* The cast to a user pointer is valid due to the set_fs() */
540 result = vfs_read(file, (void __user *)addr, count, &pos);
545 EXPORT_SYMBOL(kernel_read);
547 static int exec_mmap(struct mm_struct *mm)
549 struct task_struct *tsk;
550 struct mm_struct * old_mm, *active_mm;
552 /* Add it to the list of mm's */
553 spin_lock(&mmlist_lock);
554 list_add(&mm->mmlist, &init_mm.mmlist);
556 spin_unlock(&mmlist_lock);
558 /* Notify parent that we're no longer interested in the old VM */
560 old_mm = current->mm;
561 mm_release(tsk, old_mm);
564 active_mm = tsk->active_mm;
567 activate_mm(active_mm, mm);
569 arch_pick_mmap_layout(mm);
570 #ifdef CONFIG_CKRM_RES_MEM
572 spin_lock(&old_mm->peertask_lock);
573 list_del(&tsk->mm_peers);
574 ckrm_mem_evaluate_mm(old_mm);
575 spin_unlock(&old_mm->peertask_lock);
577 spin_lock(&mm->peertask_lock);
578 list_add_tail(&tsk->mm_peers, &mm->tasklist);
579 ckrm_mem_evaluate_mm(mm);
580 spin_unlock(&mm->peertask_lock);
583 if (active_mm != old_mm) BUG();
592 * This function makes sure the current process has its own signal table,
593 * so that flush_signal_handlers can later reset the handlers without
594 * disturbing other processes. (Other processes might share the signal
595 * table via the CLONE_SIGHAND option to clone().)
597 static inline int de_thread(struct task_struct *tsk)
599 struct signal_struct *newsig, *oldsig = tsk->signal;
600 struct sighand_struct *newsighand, *oldsighand = tsk->sighand;
601 spinlock_t *lock = &oldsighand->siglock;
605 * If we don't share sighandlers, then we aren't sharing anything
606 * and we can just re-use it all.
608 if (atomic_read(&oldsighand->count) <= 1)
611 newsighand = kmem_cache_alloc(sighand_cachep, GFP_KERNEL);
615 spin_lock_init(&newsighand->siglock);
616 atomic_set(&newsighand->count, 1);
617 memcpy(newsighand->action, oldsighand->action, sizeof(newsighand->action));
620 * See if we need to allocate a new signal structure
623 if (atomic_read(&oldsig->count) > 1) {
624 newsig = kmem_cache_alloc(signal_cachep, GFP_KERNEL);
626 kmem_cache_free(sighand_cachep, newsighand);
629 atomic_set(&newsig->count, 1);
630 newsig->group_exit = 0;
631 newsig->group_exit_code = 0;
632 newsig->group_exit_task = NULL;
633 newsig->group_stop_count = 0;
634 newsig->curr_target = NULL;
635 init_sigpending(&newsig->shared_pending);
636 INIT_LIST_HEAD(&newsig->posix_timers);
638 newsig->tty = oldsig->tty;
639 newsig->pgrp = oldsig->pgrp;
640 newsig->session = oldsig->session;
641 newsig->leader = oldsig->leader;
642 newsig->tty_old_pgrp = oldsig->tty_old_pgrp;
645 if (thread_group_empty(current))
646 goto no_thread_group;
649 * Kill all other threads in the thread group.
650 * We must hold tasklist_lock to call zap_other_threads.
652 read_lock(&tasklist_lock);
654 if (oldsig->group_exit) {
656 * Another group action in progress, just
657 * return so that the signal is processed.
659 spin_unlock_irq(lock);
660 read_unlock(&tasklist_lock);
661 kmem_cache_free(sighand_cachep, newsighand);
663 kmem_cache_free(signal_cachep, newsig);
666 oldsig->group_exit = 1;
667 zap_other_threads(current);
668 read_unlock(&tasklist_lock);
671 * Account for the thread group leader hanging around:
674 if (current->pid == current->tgid)
676 while (atomic_read(&oldsig->count) > count) {
677 oldsig->group_exit_task = current;
678 oldsig->notify_count = count;
679 __set_current_state(TASK_UNINTERRUPTIBLE);
680 spin_unlock_irq(lock);
684 spin_unlock_irq(lock);
687 * At this point all other threads have exited, all we have to
688 * do is to wait for the thread group leader to become inactive,
689 * and to assume its PID:
691 if (current->pid != current->tgid) {
692 struct task_struct *leader = current->group_leader, *parent;
693 struct dentry *proc_dentry1, *proc_dentry2;
694 unsigned long exit_state, ptrace;
697 * Wait for the thread group leader to be a zombie.
698 * It should already be zombie at this point, most
701 while (leader->exit_state != EXIT_ZOMBIE)
704 spin_lock(&leader->proc_lock);
705 spin_lock(¤t->proc_lock);
706 proc_dentry1 = proc_pid_unhash(current);
707 proc_dentry2 = proc_pid_unhash(leader);
708 write_lock_irq(&tasklist_lock);
710 if (leader->tgid != current->tgid)
712 if (current->pid == current->tgid)
715 * An exec() starts a new thread group with the
716 * TGID of the previous thread group. Rehash the
717 * two threads with a switched PID, and release
718 * the former thread group leader:
720 ptrace = leader->ptrace;
721 parent = leader->parent;
723 ptrace_unlink(current);
724 ptrace_unlink(leader);
725 remove_parent(current);
726 remove_parent(leader);
728 switch_exec_pids(leader, current);
730 current->parent = current->real_parent = leader->real_parent;
731 leader->parent = leader->real_parent = child_reaper;
732 current->group_leader = current;
733 leader->group_leader = leader;
735 add_parent(current, current->parent);
736 add_parent(leader, leader->parent);
738 current->ptrace = ptrace;
739 __ptrace_link(current, parent);
742 list_del(¤t->tasks);
743 list_add_tail(¤t->tasks, &init_task.tasks);
744 current->exit_signal = SIGCHLD;
745 exit_state = leader->exit_state;
747 write_unlock_irq(&tasklist_lock);
748 spin_unlock(&leader->proc_lock);
749 spin_unlock(¤t->proc_lock);
750 proc_pid_flush(proc_dentry1);
751 proc_pid_flush(proc_dentry2);
753 if (exit_state != EXIT_ZOMBIE)
755 release_task(leader);
760 write_lock_irq(&tasklist_lock);
761 spin_lock(&oldsighand->siglock);
762 spin_lock(&newsighand->siglock);
764 if (current == oldsig->curr_target)
765 oldsig->curr_target = next_thread(current);
767 current->signal = newsig;
768 current->sighand = newsighand;
769 init_sigpending(¤t->pending);
772 spin_unlock(&newsighand->siglock);
773 spin_unlock(&oldsighand->siglock);
774 write_unlock_irq(&tasklist_lock);
776 if (newsig && atomic_dec_and_test(&oldsig->count)) {
777 exit_itimers(oldsig);
778 kmem_cache_free(signal_cachep, oldsig);
781 if (atomic_dec_and_test(&oldsighand->count))
782 kmem_cache_free(sighand_cachep, oldsighand);
784 if (!thread_group_empty(current))
786 if (current->tgid != current->pid)
792 * These functions flushes out all traces of the currently running executable
793 * so that a new one can be started
796 static inline void flush_old_files(struct files_struct * files)
800 spin_lock(&files->file_lock);
802 unsigned long set, i;
806 if (i >= files->max_fds || i >= files->max_fdset)
808 set = files->close_on_exec->fds_bits[j];
811 files->close_on_exec->fds_bits[j] = 0;
812 spin_unlock(&files->file_lock);
813 for ( ; set ; i++,set >>= 1) {
818 spin_lock(&files->file_lock);
821 spin_unlock(&files->file_lock);
824 void get_task_comm(char *buf, struct task_struct *tsk)
826 /* buf must be at least sizeof(tsk->comm) in size */
828 memcpy(buf, tsk->comm, sizeof(tsk->comm));
832 void set_task_comm(struct task_struct *tsk, char *buf)
835 strlcpy(tsk->comm, buf, sizeof(tsk->comm));
839 int flush_old_exec(struct linux_binprm * bprm)
843 struct files_struct *files;
844 char tcomm[sizeof(current->comm)];
847 * Make sure we have a private signal table and that
848 * we are unassociated from the previous thread group.
850 retval = de_thread(current);
855 * Make sure we have private file handles. Ask the
856 * fork helper to do the work for us and the exit
857 * helper to do the cleanup of the old one.
859 files = current->files; /* refcounted so safe to hold */
860 retval = unshare_files();
864 * Release all of the old mmap stuff
866 retval = exec_mmap(bprm->mm);
870 bprm->mm = NULL; /* We're using it now */
872 /* This is the point of no return */
874 put_files_struct(files);
876 current->sas_ss_sp = current->sas_ss_size = 0;
878 if (current->euid == current->uid && current->egid == current->gid)
879 current->mm->dumpable = 1;
881 current->mm->dumpable = suid_dumpable;
883 name = bprm->filename;
884 for (i=0; (ch = *(name++)) != '\0';) {
888 if (i < (sizeof(tcomm) - 1))
892 set_task_comm(current, tcomm);
894 current->flags &= ~PF_RELOCEXEC;
897 if (bprm->e_uid != current->euid || bprm->e_gid != current->egid ||
898 permission(bprm->file->f_dentry->d_inode,MAY_READ, NULL) ||
899 (bprm->interp_flags & BINPRM_FLAGS_ENFORCE_NONDUMP))
900 current->mm->dumpable = suid_dumpable;
902 /* An exec changes our domain. We are no longer part of the thread
905 current->self_exec_id++;
907 flush_signal_handlers(current, 0);
908 flush_old_files(current->files);
913 put_files_struct(current->files);
914 current->files = files;
919 EXPORT_SYMBOL(flush_old_exec);
922 * Fill the binprm structure from the inode.
923 * Check permissions, then read the first 128 (BINPRM_BUF_SIZE) bytes
925 int prepare_binprm(struct linux_binprm *bprm)
928 struct inode * inode = bprm->file->f_dentry->d_inode;
931 mode = inode->i_mode;
933 * Check execute perms again - if the caller has CAP_DAC_OVERRIDE,
934 * vfs_permission lets a non-executable through
936 if (!(mode & 0111)) /* with at least _one_ execute bit set */
938 if (bprm->file->f_op == NULL)
941 bprm->e_uid = current->euid;
942 bprm->e_gid = current->egid;
944 if(!(bprm->file->f_vfsmnt->mnt_flags & MNT_NOSUID)) {
946 if (mode & S_ISUID) {
947 current->personality &= ~PER_CLEAR_ON_SETID;
948 bprm->e_uid = inode->i_uid;
953 * If setgid is set but no group execute bit then this
954 * is a candidate for mandatory locking, not a setgid
957 if ((mode & (S_ISGID | S_IXGRP)) == (S_ISGID | S_IXGRP)) {
958 current->personality &= ~PER_CLEAR_ON_SETID;
959 bprm->e_gid = inode->i_gid;
963 /* fill in binprm security blob */
964 retval = security_bprm_set(bprm);
968 memset(bprm->buf,0,BINPRM_BUF_SIZE);
969 return kernel_read(bprm->file,0,bprm->buf,BINPRM_BUF_SIZE);
972 EXPORT_SYMBOL(prepare_binprm);
974 static inline int unsafe_exec(struct task_struct *p)
977 if (p->ptrace & PT_PTRACED) {
978 if (p->ptrace & PT_PTRACE_CAP)
979 unsafe |= LSM_UNSAFE_PTRACE_CAP;
981 unsafe |= LSM_UNSAFE_PTRACE;
983 if (atomic_read(&p->fs->count) > 1 ||
984 atomic_read(&p->files->count) > 1 ||
985 atomic_read(&p->sighand->count) > 1)
986 unsafe |= LSM_UNSAFE_SHARE;
991 void compute_creds(struct linux_binprm *bprm)
995 unsafe = unsafe_exec(current);
996 security_bprm_apply_creds(bprm, unsafe);
997 task_unlock(current);
1000 EXPORT_SYMBOL(compute_creds);
1002 void remove_arg_zero(struct linux_binprm *bprm)
1005 unsigned long offset;
1009 offset = bprm->p % PAGE_SIZE;
1012 while (bprm->p++, *(kaddr+offset++)) {
1013 if (offset != PAGE_SIZE)
1016 kunmap_atomic(kaddr, KM_USER0);
1018 page = bprm->page[bprm->p/PAGE_SIZE];
1019 kaddr = kmap_atomic(page, KM_USER0);
1021 kunmap_atomic(kaddr, KM_USER0);
1026 EXPORT_SYMBOL(remove_arg_zero);
1029 * cycle the list of binary formats handler, until one recognizes the image
1031 int search_binary_handler(struct linux_binprm *bprm,struct pt_regs *regs)
1034 struct linux_binfmt *fmt;
1036 /* handle /sbin/loader.. */
1038 struct exec * eh = (struct exec *) bprm->buf;
1040 if (!bprm->loader && eh->fh.f_magic == 0x183 &&
1041 (eh->fh.f_flags & 0x3000) == 0x3000)
1044 unsigned long loader;
1046 allow_write_access(bprm->file);
1050 loader = PAGE_SIZE*MAX_ARG_PAGES-sizeof(void *);
1052 file = open_exec("/sbin/loader");
1053 retval = PTR_ERR(file);
1057 /* Remember if the application is TASO. */
1058 bprm->sh_bang = eh->ah.entry < 0x100000000UL;
1061 bprm->loader = loader;
1062 retval = prepare_binprm(bprm);
1065 /* should call search_binary_handler recursively here,
1066 but it does not matter */
1070 retval = security_bprm_check(bprm);
1074 /* kernel module loader fixup */
1075 /* so we don't try to load run modprobe in kernel space. */
1078 for (try=0; try<2; try++) {
1079 read_lock(&binfmt_lock);
1080 for (fmt = formats ; fmt ; fmt = fmt->next) {
1081 int (*fn)(struct linux_binprm *, struct pt_regs *) = fmt->load_binary;
1084 if (!try_module_get(fmt->module))
1086 read_unlock(&binfmt_lock);
1087 retval = fn(bprm, regs);
1090 allow_write_access(bprm->file);
1094 current->did_exec = 1;
1095 ckrm_cb_exec(bprm->filename);
1098 read_lock(&binfmt_lock);
1100 if (retval != -ENOEXEC || bprm->mm == NULL)
1103 read_unlock(&binfmt_lock);
1107 read_unlock(&binfmt_lock);
1108 if (retval != -ENOEXEC || bprm->mm == NULL) {
1112 #define printable(c) (((c)=='\t') || ((c)=='\n') || (0x20<=(c) && (c)<=0x7e))
1113 if (printable(bprm->buf[0]) &&
1114 printable(bprm->buf[1]) &&
1115 printable(bprm->buf[2]) &&
1116 printable(bprm->buf[3]))
1117 break; /* -ENOEXEC */
1118 request_module("binfmt-%04x", *(unsigned short *)(&bprm->buf[2]));
1125 EXPORT_SYMBOL(search_binary_handler);
1128 * sys_execve() executes a new program.
1130 int do_execve(char * filename,
1131 char __user *__user *argv,
1132 char __user *__user *envp,
1133 struct pt_regs * regs)
1135 struct linux_binprm *bprm;
1140 file = open_exec(filename);
1142 retval = PTR_ERR(file);
1149 bprm = kmalloc(sizeof(*bprm), GFP_KERNEL);
1152 memset(bprm, 0, sizeof(*bprm));
1154 bprm->p = PAGE_SIZE*MAX_ARG_PAGES-sizeof(void *);
1157 bprm->filename = filename;
1158 bprm->interp = filename;
1159 bprm->mm = mm_alloc();
1163 retval = init_new_context(current, bprm->mm);
1167 bprm->argc = count(argv, bprm->p / sizeof(void *));
1168 if ((retval = bprm->argc) < 0)
1171 bprm->envc = count(envp, bprm->p / sizeof(void *));
1172 if ((retval = bprm->envc) < 0)
1175 retval = security_bprm_alloc(bprm);
1179 retval = prepare_binprm(bprm);
1183 retval = copy_strings_kernel(1, &bprm->filename, bprm);
1187 bprm->exec = bprm->p;
1188 retval = copy_strings(bprm->envc, envp, bprm);
1192 retval = copy_strings(bprm->argc, argv, bprm);
1196 retval = search_binary_handler(bprm,regs);
1198 free_arg_pages(bprm);
1200 /* execve success */
1201 security_bprm_free(bprm);
1207 /* Something went wrong, return the inode and free the argument pages*/
1208 for (i = 0 ; i < MAX_ARG_PAGES ; i++) {
1209 struct page * page = bprm->page[i];
1215 security_bprm_free(bprm);
1223 allow_write_access(bprm->file);
1232 int set_binfmt(struct linux_binfmt *new)
1234 struct linux_binfmt *old = current->binfmt;
1237 if (!try_module_get(new->module))
1240 current->binfmt = new;
1242 module_put(old->module);
1246 EXPORT_SYMBOL(set_binfmt);
1248 #define CORENAME_MAX_SIZE 64
1250 /* format_corename will inspect the pattern parameter, and output a
1251 * name into corename, which must have space for at least
1252 * CORENAME_MAX_SIZE bytes plus one byte for the zero terminator.
1254 static void format_corename(char *corename, const char *pattern, long signr)
1256 const char *pat_ptr = pattern;
1257 char *out_ptr = corename;
1258 char *const out_end = corename + CORENAME_MAX_SIZE;
1260 int pid_in_pattern = 0;
1262 /* Repeat as long as we have more pattern to process and more output
1265 if (*pat_ptr != '%') {
1266 if (out_ptr == out_end)
1268 *out_ptr++ = *pat_ptr++;
1270 switch (*++pat_ptr) {
1273 /* Double percent, output one percent */
1275 if (out_ptr == out_end)
1282 rc = snprintf(out_ptr, out_end - out_ptr,
1283 "%d", current->tgid);
1284 if (rc > out_end - out_ptr)
1290 rc = snprintf(out_ptr, out_end - out_ptr,
1291 "%d", current->uid);
1292 if (rc > out_end - out_ptr)
1298 rc = snprintf(out_ptr, out_end - out_ptr,
1299 "%d", current->gid);
1300 if (rc > out_end - out_ptr)
1304 /* signal that caused the coredump */
1306 rc = snprintf(out_ptr, out_end - out_ptr,
1308 if (rc > out_end - out_ptr)
1312 /* UNIX time of coredump */
1315 do_gettimeofday(&tv);
1316 rc = snprintf(out_ptr, out_end - out_ptr,
1318 if (rc > out_end - out_ptr)
1325 down_read(&uts_sem);
1326 rc = snprintf(out_ptr, out_end - out_ptr,
1327 "%s", system_utsname.nodename);
1329 if (rc > out_end - out_ptr)
1335 rc = snprintf(out_ptr, out_end - out_ptr,
1336 "%s", current->comm);
1337 if (rc > out_end - out_ptr)
1347 /* Backward compatibility with core_uses_pid:
1349 * If core_pattern does not include a %p (as is the default)
1350 * and core_uses_pid is set, then .%pid will be appended to
1353 && (core_uses_pid || atomic_read(¤t->mm->mm_users) != 1)) {
1354 rc = snprintf(out_ptr, out_end - out_ptr,
1355 ".%d", current->tgid);
1356 if (rc > out_end - out_ptr)
1364 static void zap_threads (struct mm_struct *mm)
1366 struct task_struct *g, *p;
1367 struct task_struct *tsk = current;
1368 struct completion *vfork_done = tsk->vfork_done;
1371 * Make sure nobody is waiting for us to release the VM,
1372 * otherwise we can deadlock when we wait on each other
1375 tsk->vfork_done = NULL;
1376 complete(vfork_done);
1379 read_lock(&tasklist_lock);
1381 if (mm == p->mm && p != tsk) {
1382 force_sig_specific(SIGKILL, p);
1385 while_each_thread(g,p);
1387 read_unlock(&tasklist_lock);
1390 static void coredump_wait(struct mm_struct *mm)
1392 DECLARE_COMPLETION(startup_done);
1394 mm->core_waiters++; /* let other threads block */
1395 mm->core_startup_done = &startup_done;
1397 /* give other threads a chance to run: */
1401 if (--mm->core_waiters) {
1402 up_write(&mm->mmap_sem);
1403 wait_for_completion(&startup_done);
1405 up_write(&mm->mmap_sem);
1406 BUG_ON(mm->core_waiters);
1409 int do_coredump(long signr, int exit_code, struct pt_regs * regs)
1411 char corename[CORENAME_MAX_SIZE + 1];
1412 struct mm_struct *mm = current->mm;
1413 struct linux_binfmt * binfmt;
1414 struct inode * inode;
1417 int fsuid = current->fsuid;
1420 binfmt = current->binfmt;
1421 if (!binfmt || !binfmt->core_dump)
1423 if (current->tux_exit)
1424 current->tux_exit();
1425 down_write(&mm->mmap_sem);
1426 if (!mm->dumpable) {
1427 up_write(&mm->mmap_sem);
1432 * We cannot trust fsuid as being the "true" uid of the
1433 * process nor do we know its entire history. We only know it
1434 * was tainted so we dump it as root in mode 2.
1436 if (mm->dumpable == 2) /* Setuid core dump mode */
1438 flag = O_EXCL; /* Stop rewrite attacks */
1439 current->fsuid = 0; /* Dump root private */
1442 init_completion(&mm->core_done);
1443 current->signal->group_exit = 1;
1444 current->signal->group_exit_code = exit_code;
1447 if (current->rlim[RLIMIT_CORE].rlim_cur < binfmt->min_coredump)
1451 * lock_kernel() because format_corename() is controlled by sysctl, which
1452 * uses lock_kernel()
1455 format_corename(corename, core_pattern, signr);
1457 file = filp_open(corename, O_CREAT | 2 | O_NOFOLLOW | O_LARGEFILE | flag, 0600);
1460 inode = file->f_dentry->d_inode;
1461 if (inode->i_nlink > 1)
1462 goto close_fail; /* multiple links - don't dump */
1463 if (d_unhashed(file->f_dentry))
1466 if (!S_ISREG(inode->i_mode))
1470 if (!file->f_op->write)
1472 if (do_truncate(file->f_dentry, 0) != 0)
1475 retval = binfmt->core_dump(signr, regs, file);
1477 current->signal->group_exit_code |= 0x80;
1479 filp_close(file, NULL);
1481 current->fsuid = fsuid;
1482 complete_all(&mm->core_done);