* formats.
*/
+#include <linux/config.h>
#include <linux/slab.h>
#include <linux/file.h>
#include <linux/mman.h>
#include <linux/rmap.h>
#include <linux/acct.h>
#include <linux/cn_proc.h>
-#include <linux/audit.h>
-#include <linux/vs_base.h>
#include <linux/vs_memory.h>
#include <linux/vs_cvirt.h>
if (!(nd.mnt->mnt_flags & MNT_NOEXEC) &&
S_ISREG(inode->i_mode)) {
int err = vfs_permission(&nd, MAY_EXEC);
+ if (!err && !(inode->i_mode & 0111))
+ err = -EACCES;
file = ERR_PTR(err);
if (!err) {
file = nameidata_to_filp(&nd, O_RDONLY);
* and to assume its PID:
*/
if (!thread_group_leader(current)) {
+ struct dentry *proc_dentry1, *proc_dentry2;
+
/*
* Wait for the thread group leader to be a zombie.
* It should already be zombie at this point, most
*/
current->start_time = leader->start_time;
+ spin_lock(&leader->proc_lock);
+ spin_lock(¤t->proc_lock);
+ proc_dentry1 = proc_pid_unhash(current);
+ proc_dentry2 = proc_pid_unhash(leader);
write_lock_irq(&tasklist_lock);
BUG_ON(leader->tgid != current->tgid);
attach_pid(current, PIDTYPE_PID, current->pid);
attach_pid(current, PIDTYPE_PGID, current->signal->pgrp);
attach_pid(current, PIDTYPE_SID, current->signal->session);
- list_replace_rcu(&leader->tasks, ¤t->tasks);
+ list_add_tail_rcu(¤t->tasks, &init_task.tasks);
current->group_leader = current;
leader->group_leader = current;
/* Reduce leader to a thread */
detach_pid(leader, PIDTYPE_PGID);
detach_pid(leader, PIDTYPE_SID);
+ list_del_init(&leader->tasks);
current->exit_signal = SIGCHLD;
leader->exit_state = EXIT_DEAD;
write_unlock_irq(&tasklist_lock);
+ spin_unlock(&leader->proc_lock);
+ spin_unlock(¤t->proc_lock);
+ proc_pid_flush(proc_dentry1);
+ proc_pid_flush(proc_dentry2);
}
/*
write_lock_irq(&tasklist_lock);
spin_lock(&oldsighand->siglock);
- spin_lock_nested(&newsighand->siglock, SINGLE_DEPTH_NESTING);
+ spin_lock(&newsighand->siglock);
rcu_assign_pointer(current->sighand, newsighand);
recalc_sigpending();
bprm->mm = NULL; /* We're using it now */
/* This is the point of no return */
+ steal_locks(files);
put_files_struct(files);
current->sas_ss_sp = current->sas_ss_size = 0;
int retval;
mode = inode->i_mode;
+ /*
+ * Check execute perms again - if the caller has CAP_DAC_OVERRIDE,
+ * generic_permission lets a non-executable through
+ */
+ if (!(mode & 0111)) /* with at least _one_ execute bit set */
+ return -EACCES;
if (bprm->file->f_op == NULL)
return -EACCES;
/* kernel module loader fixup */
/* so we don't try to load run modprobe in kernel space. */
set_fs(USER_DS);
-
- retval = audit_bprm(bprm);
- if (retval)
- return retval;
-
retval = -ENOENT;
for (try=0; try<2; try++) {
read_lock(&binfmt_lock);
*out_ptr = 0;
}
-static void zap_process(struct task_struct *start)
-{
- struct task_struct *t;
-
- start->signal->flags = SIGNAL_GROUP_EXIT;
- start->signal->group_stop_count = 0;
-
- t = start;
- do {
- if (t != current && t->mm) {
- t->mm->core_waiters++;
- sigaddset(&t->pending.signal, SIGKILL);
- signal_wake_up(t, 1);
- }
- } while ((t = next_thread(t)) != start);
-}
-
-static inline int zap_threads(struct task_struct *tsk, struct mm_struct *mm,
- int exit_code)
+static void zap_threads (struct mm_struct *mm)
{
struct task_struct *g, *p;
- unsigned long flags;
- int err = -EAGAIN;
-
- spin_lock_irq(&tsk->sighand->siglock);
- if (!(tsk->signal->flags & SIGNAL_GROUP_EXIT)) {
- tsk->signal->group_exit_code = exit_code;
- zap_process(tsk);
- err = 0;
+ struct task_struct *tsk = current;
+ struct completion *vfork_done = tsk->vfork_done;
+ int traced = 0;
+
+ /*
+ * Make sure nobody is waiting for us to release the VM,
+ * otherwise we can deadlock when we wait on each other
+ */
+ if (vfork_done) {
+ tsk->vfork_done = NULL;
+ complete(vfork_done);
}
- spin_unlock_irq(&tsk->sighand->siglock);
- if (err)
- return err;
- if (atomic_read(&mm->mm_users) == mm->core_waiters + 1)
- goto done;
+ read_lock(&tasklist_lock);
+ do_each_thread(g,p)
+ if (mm == p->mm && p != tsk) {
+ force_sig_specific(SIGKILL, p);
+ mm->core_waiters++;
+ if (unlikely(p->ptrace) &&
+ unlikely(p->parent->mm == mm))
+ traced = 1;
+ }
+ while_each_thread(g,p);
- rcu_read_lock();
- for_each_process(g) {
- if (g == tsk->group_leader)
- continue;
+ read_unlock(&tasklist_lock);
- p = g;
- do {
- if (p->mm) {
- if (p->mm == mm) {
- /*
- * p->sighand can't disappear, but
- * may be changed by de_thread()
- */
- lock_task_sighand(p, &flags);
- zap_process(p);
- unlock_task_sighand(p, &flags);
- }
- break;
+ if (unlikely(traced)) {
+ /*
+ * We are zapping a thread and the thread it ptraces.
+ * If the tracee went into a ptrace stop for exit tracing,
+ * we could deadlock since the tracer is waiting for this
+ * coredump to finish. Detach them so they can both die.
+ */
+ write_lock_irq(&tasklist_lock);
+ do_each_thread(g,p) {
+ if (mm == p->mm && p != tsk &&
+ p->ptrace && p->parent->mm == mm) {
+ __ptrace_detach(p, 0);
}
- } while ((p = next_thread(p)) != g);
+ } while_each_thread(g,p);
+ write_unlock_irq(&tasklist_lock);
}
- rcu_read_unlock();
-done:
- return mm->core_waiters;
}
-static int coredump_wait(int exit_code)
+static void coredump_wait(struct mm_struct *mm)
{
- struct task_struct *tsk = current;
- struct mm_struct *mm = tsk->mm;
- struct completion startup_done;
- struct completion *vfork_done;
+ DECLARE_COMPLETION(startup_done);
int core_waiters;
- init_completion(&mm->core_done);
- init_completion(&startup_done);
mm->core_startup_done = &startup_done;
- core_waiters = zap_threads(tsk, mm, exit_code);
+ zap_threads(mm);
+ core_waiters = mm->core_waiters;
up_write(&mm->mmap_sem);
- if (unlikely(core_waiters < 0))
- goto fail;
-
- /*
- * Make sure nobody is waiting for us to release the VM,
- * otherwise we can deadlock when we wait on each other
- */
- vfork_done = tsk->vfork_done;
- if (vfork_done) {
- tsk->vfork_done = NULL;
- complete(vfork_done);
- }
-
if (core_waiters)
wait_for_completion(&startup_done);
-fail:
BUG_ON(mm->core_waiters);
- return core_waiters;
}
int do_coredump(long signr, int exit_code, struct pt_regs * regs)
}
mm->dumpable = 0;
- retval = coredump_wait(exit_code);
- if (retval < 0)
+ retval = -EAGAIN;
+ spin_lock_irq(¤t->sighand->siglock);
+ if (!(current->signal->flags & SIGNAL_GROUP_EXIT)) {
+ current->signal->flags = SIGNAL_GROUP_EXIT;
+ current->signal->group_exit_code = exit_code;
+ current->signal->group_stop_count = 0;
+ retval = 0;
+ }
+ spin_unlock_irq(¤t->sighand->siglock);
+ if (retval) {
+ up_write(&mm->mmap_sem);
goto fail;
+ }
+
+ init_completion(&mm->core_done);
+ coredump_wait(mm);
/*
* Clear any false indication of pending signals that might