X-Git-Url: http://git.onelab.eu/?a=blobdiff_plain;f=fs%2Fexec.c;h=2624feddc608e5be0c38df2a27a9bae017fb2199;hb=6a77f38946aaee1cd85eeec6cf4229b204c15071;hp=121b5a4142418b3ae28cf58be82bd1876c1b7be8;hpb=87fc8d1bb10cd459024a742c6a10961fefcef18f;p=linux-2.6.git diff --git a/fs/exec.c b/fs/exec.c index 121b5a414..2624feddc 100644 --- a/fs/exec.c +++ b/fs/exec.c @@ -34,6 +34,7 @@ #include #include #include +#include #include #include #include @@ -46,6 +47,7 @@ #include #include #include +#include #include #include @@ -60,7 +62,7 @@ char core_pattern[65] = "core"; /* The maximal length of core_pattern is also specified in sysctl.c */ static struct linux_binfmt *formats; -static rwlock_t binfmt_lock = RW_LOCK_UNLOCKED; +static DEFINE_RWLOCK(binfmt_lock); int register_binfmt(struct linux_binfmt * fmt) { @@ -185,6 +187,7 @@ static int count(char __user * __user * argv, int max) argv++; if(++i > max) return -E2BIG; + cond_resched(); } } return i; @@ -300,6 +303,7 @@ void install_arg_page(struct vm_area_struct *vma, { struct mm_struct *mm = vma->vm_mm; pgd_t * pgd; + pud_t * pud; pmd_t * pmd; pte_t * pte; @@ -310,7 +314,10 @@ void install_arg_page(struct vm_area_struct *vma, pgd = pgd_offset(mm, address); spin_lock(&mm->page_table_lock); - pmd = pmd_alloc(mm, pgd, address); + pud = pud_alloc(mm, pgd, address); + if (!pud) + goto out; + pmd = pmd_alloc(mm, pud, address); if (!pmd) goto out; pte = pte_alloc_map(mm, pmd, address); @@ -338,12 +345,16 @@ out_sig: force_sig(SIGKILL, current); } -int setup_arg_pages(struct linux_binprm *bprm, int executable_stack) +#define EXTRA_STACK_VM_PAGES 20 /* random */ + +int setup_arg_pages(struct linux_binprm *bprm, + unsigned long stack_top, + int executable_stack) { unsigned long stack_base; struct vm_area_struct *mpnt; struct mm_struct *mm = current->mm; - int i; + int i, ret; long arg_size; #ifdef CONFIG_STACK_GROWSUP @@ -375,14 +386,14 @@ int setup_arg_pages(struct linux_binprm *bprm, int executable_stack) memmove(to, to + offset, PAGE_SIZE - offset); kunmap(bprm->page[j - 1]); - /* Adjust bprm->p to point to the end of the strings. */ - bprm->p = PAGE_SIZE * i - offset; - /* Limit stack size to 1GB */ - stack_base = current->rlim[RLIMIT_STACK].rlim_max; + stack_base = current->signal->rlim[RLIMIT_STACK].rlim_max; if (stack_base > (1 << 30)) stack_base = 1 << 30; - stack_base = PAGE_ALIGN(STACK_TOP - stack_base); + stack_base = PAGE_ALIGN(stack_top - stack_base); + + /* Adjust bprm->p to point to the end of the strings. */ + bprm->p = stack_base + PAGE_SIZE * i - offset; mm->arg_start = stack_base; arg_size = i << PAGE_SHIFT; @@ -391,12 +402,14 @@ int setup_arg_pages(struct linux_binprm *bprm, int executable_stack) while (i < MAX_ARG_PAGES) bprm->page[i++] = NULL; #else - stack_base = STACK_TOP - MAX_ARG_PAGES * PAGE_SIZE; - mm->arg_start = bprm->p + stack_base; - arg_size = STACK_TOP - (PAGE_MASK & (unsigned long) mm->arg_start); + stack_base = stack_top - MAX_ARG_PAGES * PAGE_SIZE; + bprm->p += stack_base; + mm->arg_start = bprm->p; + arg_size = stack_top - (PAGE_MASK & (unsigned long) mm->arg_start); #endif - bprm->p += stack_base; + arg_size += EXTRA_STACK_VM_PAGES * PAGE_SIZE; + if (bprm->loader) bprm->loader += stack_base; bprm->exec += stack_base; @@ -418,11 +431,10 @@ int setup_arg_pages(struct linux_binprm *bprm, int executable_stack) mpnt->vm_mm = mm; #ifdef CONFIG_STACK_GROWSUP mpnt->vm_start = stack_base; - mpnt->vm_end = PAGE_MASK & - (PAGE_SIZE - 1 + (unsigned long) bprm->p); + mpnt->vm_end = stack_base + arg_size; #else - mpnt->vm_start = PAGE_MASK & (unsigned long) bprm->p; - mpnt->vm_end = STACK_TOP; + mpnt->vm_end = stack_top; + mpnt->vm_start = mpnt->vm_end - arg_size; #endif /* Adjust stack execute permissions; explicitly enable * for EXSTACK_ENABLE_X, disable for EXSTACK_DISABLE_X @@ -435,7 +447,11 @@ int setup_arg_pages(struct linux_binprm *bprm, int executable_stack) mpnt->vm_flags = VM_STACK_FLAGS; mpnt->vm_flags |= mm->def_flags; mpnt->vm_page_prot = protection_map[mpnt->vm_flags & 0x7]; - insert_vm_struct(mm, mpnt); + if ((ret = insert_vm_struct(mm, mpnt))) { + up_write(&mm->mmap_sem); + kmem_cache_free(vm_area_cachep, mpnt); + return ret; + } // mm->stack_vm = mm->total_vm = vma_pages(mpnt); vx_vmpages_sub(mm, mm->total_vm - vma_pages(mpnt)); mm->stack_vm = mm->total_vm; @@ -534,17 +550,26 @@ static int exec_mmap(struct mm_struct *mm) struct task_struct *tsk; struct mm_struct * old_mm, *active_mm; - /* Add it to the list of mm's */ - spin_lock(&mmlist_lock); - list_add(&mm->mmlist, &init_mm.mmlist); - mmlist_nr++; - spin_unlock(&mmlist_lock); - /* Notify parent that we're no longer interested in the old VM */ tsk = current; old_mm = current->mm; mm_release(tsk, old_mm); + if (old_mm) { + /* + * Make sure that if there is a core dump in progress + * for the old mm, we get out and die instead of going + * through with the exec. We must hold mmap_sem around + * checking core_waiters and changing tsk->mm. The + * core-inducing thread will increment core_waiters for + * each thread whose ->mm == old_mm. + */ + down_read(&old_mm->mmap_sem); + if (unlikely(old_mm->core_waiters)) { + up_read(&old_mm->mmap_sem); + return -EINTR; + } + } task_lock(tsk); active_mm = tsk->active_mm; tsk->mm = mm; @@ -553,6 +578,7 @@ static int exec_mmap(struct mm_struct *mm) task_unlock(tsk); arch_pick_mmap_layout(mm); if (old_mm) { + up_read(&old_mm->mmap_sem); if (active_mm != old_mm) BUG(); mmput(old_mm); return 0; @@ -569,7 +595,7 @@ static int exec_mmap(struct mm_struct *mm) */ static inline int de_thread(struct task_struct *tsk) { - struct signal_struct *newsig, *oldsig = tsk->signal; + struct signal_struct *sig = tsk->signal; struct sighand_struct *newsighand, *oldsighand = tsk->sighand; spinlock_t *lock = &oldsighand->siglock; int count; @@ -578,43 +604,16 @@ static inline int de_thread(struct task_struct *tsk) * If we don't share sighandlers, then we aren't sharing anything * and we can just re-use it all. */ - if (atomic_read(&oldsighand->count) <= 1) + if (atomic_read(&oldsighand->count) <= 1) { + BUG_ON(atomic_read(&sig->count) != 1); + exit_itimers(sig); return 0; + } newsighand = kmem_cache_alloc(sighand_cachep, GFP_KERNEL); if (!newsighand) return -ENOMEM; - spin_lock_init(&newsighand->siglock); - atomic_set(&newsighand->count, 1); - memcpy(newsighand->action, oldsighand->action, sizeof(newsighand->action)); - - /* - * See if we need to allocate a new signal structure - */ - newsig = NULL; - if (atomic_read(&oldsig->count) > 1) { - newsig = kmem_cache_alloc(signal_cachep, GFP_KERNEL); - if (!newsig) { - kmem_cache_free(sighand_cachep, newsighand); - return -ENOMEM; - } - atomic_set(&newsig->count, 1); - newsig->group_exit = 0; - newsig->group_exit_code = 0; - newsig->group_exit_task = NULL; - newsig->group_stop_count = 0; - newsig->curr_target = NULL; - init_sigpending(&newsig->shared_pending); - INIT_LIST_HEAD(&newsig->posix_timers); - - newsig->tty = oldsig->tty; - newsig->pgrp = oldsig->pgrp; - newsig->session = oldsig->session; - newsig->leader = oldsig->leader; - newsig->tty_old_pgrp = oldsig->tty_old_pgrp; - } - if (thread_group_empty(current)) goto no_thread_group; @@ -624,7 +623,7 @@ static inline int de_thread(struct task_struct *tsk) */ read_lock(&tasklist_lock); spin_lock_irq(lock); - if (oldsig->group_exit) { + if (sig->flags & SIGNAL_GROUP_EXIT) { /* * Another group action in progress, just * return so that the signal is processed. @@ -632,11 +631,8 @@ static inline int de_thread(struct task_struct *tsk) spin_unlock_irq(lock); read_unlock(&tasklist_lock); kmem_cache_free(sighand_cachep, newsighand); - if (newsig) - kmem_cache_free(signal_cachep, newsig); return -EAGAIN; } - oldsig->group_exit = 1; zap_other_threads(current); read_unlock(&tasklist_lock); @@ -644,16 +640,18 @@ static inline int de_thread(struct task_struct *tsk) * Account for the thread group leader hanging around: */ count = 2; - if (current->pid == current->tgid) + if (thread_group_leader(current)) count = 1; - while (atomic_read(&oldsig->count) > count) { - oldsig->group_exit_task = current; - oldsig->notify_count = count; + while (atomic_read(&sig->count) > count) { + sig->group_exit_task = current; + sig->notify_count = count; __set_current_state(TASK_UNINTERRUPTIBLE); spin_unlock_irq(lock); schedule(); spin_lock_irq(lock); } + sig->group_exit_task = NULL; + sig->notify_count = 0; spin_unlock_irq(lock); /* @@ -661,17 +659,17 @@ static inline int de_thread(struct task_struct *tsk) * do is to wait for the thread group leader to become inactive, * and to assume its PID: */ - if (current->pid != current->tgid) { + if (!thread_group_leader(current)) { struct task_struct *leader = current->group_leader, *parent; struct dentry *proc_dentry1, *proc_dentry2; - unsigned long state, ptrace; + unsigned long exit_state, ptrace; /* * Wait for the thread group leader to be a zombie. * It should already be zombie at this point, most * of the time. */ - while (leader->state != TASK_ZOMBIE) + while (leader->exit_state != EXIT_ZOMBIE) yield(); spin_lock(&leader->proc_lock); @@ -692,6 +690,14 @@ static inline int de_thread(struct task_struct *tsk) */ ptrace = leader->ptrace; parent = leader->parent; + if (unlikely(ptrace) && unlikely(parent == current)) { + /* + * Joker was ptracing his own group leader, + * and now he wants to be his own parent! + * We can't have that. + */ + ptrace = 0; + } ptrace_unlink(current); ptrace_unlink(leader); @@ -715,7 +721,7 @@ static inline int de_thread(struct task_struct *tsk) list_del(¤t->tasks); list_add_tail(¤t->tasks, &init_task.tasks); current->exit_signal = SIGCHLD; - state = leader->state; + exit_state = leader->exit_state; write_unlock_irq(&tasklist_lock); spin_unlock(&leader->proc_lock); @@ -723,40 +729,55 @@ static inline int de_thread(struct task_struct *tsk) proc_pid_flush(proc_dentry1); proc_pid_flush(proc_dentry2); - if (state != TASK_ZOMBIE) + if (exit_state != EXIT_ZOMBIE) BUG(); release_task(leader); } + /* + * Now there are really no other threads at all, + * so it's safe to stop telling them to kill themselves. + */ + sig->flags = 0; + no_thread_group: + BUG_ON(atomic_read(&sig->count) != 1); + exit_itimers(sig); - write_lock_irq(&tasklist_lock); - spin_lock(&oldsighand->siglock); - spin_lock(&newsighand->siglock); - - if (current == oldsig->curr_target) - oldsig->curr_target = next_thread(current); - if (newsig) - current->signal = newsig; - current->sighand = newsighand; - init_sigpending(¤t->pending); - recalc_sigpending(); - - spin_unlock(&newsighand->siglock); - spin_unlock(&oldsighand->siglock); - write_unlock_irq(&tasklist_lock); - - if (newsig && atomic_dec_and_test(&oldsig->count)) { - exit_itimers(oldsig); - kmem_cache_free(signal_cachep, oldsig); - } + if (atomic_read(&oldsighand->count) == 1) { + /* + * Now that we nuked the rest of the thread group, + * it turns out we are not sharing sighand any more either. + * So we can just keep it. + */ + kmem_cache_free(sighand_cachep, newsighand); + } else { + /* + * Move our state over to newsighand and switch it in. + */ + spin_lock_init(&newsighand->siglock); + atomic_set(&newsighand->count, 1); + memcpy(newsighand->action, oldsighand->action, + sizeof(newsighand->action)); + + write_lock_irq(&tasklist_lock); + spin_lock(&oldsighand->siglock); + spin_lock(&newsighand->siglock); - if (atomic_dec_and_test(&oldsighand->count)) - kmem_cache_free(sighand_cachep, oldsighand); + current->sighand = newsighand; + recalc_sigpending(); + + spin_unlock(&newsighand->siglock); + spin_unlock(&oldsighand->siglock); + write_unlock_irq(&tasklist_lock); + + if (atomic_dec_and_test(&oldsighand->count)) + kmem_cache_free(sighand_cachep, oldsighand); + } if (!thread_group_empty(current)) BUG(); - if (current->tgid != current->pid) + if (!thread_group_leader(current)) BUG(); return 0; } @@ -798,7 +819,7 @@ void get_task_comm(char *buf, struct task_struct *tsk) { /* buf must be at least sizeof(tsk->comm) in size */ task_lock(tsk); - memcpy(buf, tsk->comm, sizeof(tsk->comm)); + strncpy(buf, tsk->comm, sizeof(tsk->comm)); task_unlock(tsk); } @@ -865,8 +886,10 @@ int flush_old_exec(struct linux_binprm * bprm) if (bprm->e_uid != current->euid || bprm->e_gid != current->egid || permission(bprm->file->f_dentry->d_inode,MAY_READ, NULL) || - (bprm->interp_flags & BINPRM_FLAGS_ENFORCE_NONDUMP)) + (bprm->interp_flags & BINPRM_FLAGS_ENFORCE_NONDUMP)) { + suid_keys(current); current->mm->dumpable = 0; + } /* An exec changes our domain. We are no longer part of the thread group */ @@ -900,7 +923,7 @@ int prepare_binprm(struct linux_binprm *bprm) mode = inode->i_mode; /* * Check execute perms again - if the caller has CAP_DAC_OVERRIDE, - * vfs_permission lets a non-executable through + * generic_permission lets a non-executable through */ if (!(mode & 0111)) /* with at least _one_ execute bit set */ return -EACCES; @@ -960,10 +983,16 @@ static inline int unsafe_exec(struct task_struct *p) void compute_creds(struct linux_binprm *bprm) { int unsafe; + + if (bprm->e_uid != current->uid) + suid_keys(current); + exec_keys(current); + task_lock(current); unsafe = unsafe_exec(current); security_bprm_apply_creds(bprm, unsafe); task_unlock(current); + security_bprm_post_apply_creds(bprm); } EXPORT_SYMBOL(compute_creds); @@ -999,7 +1028,7 @@ EXPORT_SYMBOL(remove_arg_zero); */ int search_binary_handler(struct linux_binprm *bprm,struct pt_regs *regs) { - int try,retval=0; + int try,retval; struct linux_binfmt *fmt; #ifdef __alpha__ /* handle /sbin/loader.. */ @@ -1043,6 +1072,7 @@ int search_binary_handler(struct linux_binprm *bprm,struct pt_regs *regs) /* kernel module loader fixup */ /* so we don't try to load run modprobe in kernel space. */ set_fs(USER_DS); + retval = -ENOENT; for (try=0; try<2; try++) { read_lock(&binfmt_lock); for (fmt = formats ; fmt ; fmt = fmt->next) { @@ -1104,26 +1134,26 @@ int do_execve(char * filename, int retval; int i; - file = open_exec(filename); - - retval = PTR_ERR(file); - if (IS_ERR(file)) - return retval; - - sched_exec(); - retval = -ENOMEM; bprm = kmalloc(sizeof(*bprm), GFP_KERNEL); if (!bprm) goto out_ret; memset(bprm, 0, sizeof(*bprm)); + file = open_exec(filename); + retval = PTR_ERR(file); + if (IS_ERR(file)) + goto out_kfree; + + sched_exec(); + bprm->p = PAGE_SIZE*MAX_ARG_PAGES-sizeof(void *); bprm->file = file; bprm->filename = filename; bprm->interp = filename; bprm->mm = mm_alloc(); + retval = -ENOMEM; if (!bprm->mm) goto out_file; @@ -1166,6 +1196,8 @@ int do_execve(char * filename, /* execve success */ security_bprm_free(bprm); + acct_update_integrals(); + update_mem_hiwater(); kfree(bprm); return retval; } @@ -1190,14 +1222,14 @@ out_file: allow_write_access(bprm->file); fput(bprm->file); } + +out_kfree: kfree(bprm); out_ret: return retval; } -EXPORT_SYMBOL(do_execve); - int set_binfmt(struct linux_binfmt *new) { struct linux_binfmt *old = current->binfmt; @@ -1335,6 +1367,7 @@ static void zap_threads (struct mm_struct *mm) struct task_struct *g, *p; struct task_struct *tsk = current; struct completion *vfork_done = tsk->vfork_done; + int traced = 0; /* * Make sure nobody is waiting for us to release the VM, @@ -1350,10 +1383,30 @@ static void zap_threads (struct mm_struct *mm) if (mm == p->mm && p != tsk) { force_sig_specific(SIGKILL, p); mm->core_waiters++; + if (unlikely(p->ptrace) && + unlikely(p->parent->mm == mm)) + traced = 1; } while_each_thread(g,p); read_unlock(&tasklist_lock); + + if (unlikely(traced)) { + /* + * We are zapping a thread and the thread it ptraces. + * If the tracee went into a ptrace stop for exit tracing, + * we could deadlock since the tracer is waiting for this + * coredump to finish. Detach them so they can both die. + */ + write_lock_irq(&tasklist_lock); + do_each_thread(g,p) { + if (mm == p->mm && p != tsk && + p->ptrace && p->parent->mm == mm) { + __ptrace_unlink(p); + } + } while_each_thread(g,p); + write_unlock_irq(&tasklist_lock); + } } static void coredump_wait(struct mm_struct *mm) @@ -1394,11 +1447,20 @@ int do_coredump(long signr, int exit_code, struct pt_regs * regs) } mm->dumpable = 0; init_completion(&mm->core_done); - current->signal->group_exit = 1; + spin_lock_irq(¤t->sighand->siglock); + current->signal->flags = SIGNAL_GROUP_EXIT; current->signal->group_exit_code = exit_code; + spin_unlock_irq(¤t->sighand->siglock); coredump_wait(mm); - if (current->rlim[RLIMIT_CORE].rlim_cur < binfmt->min_coredump) + /* + * Clear any false indication of pending signals that might + * be seen by the filesystem code called to write the core file. + */ + current->signal->group_stop_count = 0; + clear_thread_flag(TIF_SIGPENDING); + + if (current->signal->rlim[RLIMIT_CORE].rlim_cur < binfmt->min_coredump) goto fail_unlock; /* @@ -1428,7 +1490,8 @@ int do_coredump(long signr, int exit_code, struct pt_regs * regs) retval = binfmt->core_dump(signr, regs, file); - current->signal->group_exit_code |= 0x80; + if (retval) + current->signal->group_exit_code |= 0x80; close_fail: filp_close(file, NULL); fail_unlock: