#include <linux/pagemap.h>
#include <linux/highmem.h>
#include <linux/spinlock.h>
+#include <linux/key.h>
#include <linux/personality.h>
#include <linux/binfmts.h>
#include <linux/swap.h>
#include <linux/security.h>
#include <linux/syscalls.h>
#include <linux/rmap.h>
+#include <linux/acct.h>
+#include <linux/cn_proc.h>
#include <linux/vs_memory.h>
+#include <linux/vs_cvirt.h>
#include <asm/uaccess.h>
#include <asm/mmu_context.h>
int core_uses_pid;
char core_pattern[65] = "core";
+int suid_dumpable = 0;
+
+EXPORT_SYMBOL(suid_dumpable);
/* The maximal length of core_pattern is also specified in sysctl.c */
static struct linux_binfmt *formats;
-static rwlock_t binfmt_lock = RW_LOCK_UNLOCKED;
+static DEFINE_RWLOCK(binfmt_lock);
int register_binfmt(struct linux_binfmt * fmt)
{
struct nameidata nd;
int error;
- nd.intent.open.flags = FMODE_READ;
- error = __user_walk(library, LOOKUP_FOLLOW|LOOKUP_OPEN, &nd);
+ error = __user_path_lookup_open(library, LOOKUP_FOLLOW, &nd, FMODE_READ|FMODE_EXEC);
if (error)
goto out;
if (!S_ISREG(nd.dentry->d_inode->i_mode))
goto exit;
- error = permission(nd.dentry->d_inode, MAY_READ | MAY_EXEC, &nd);
+ error = vfs_permission(&nd, MAY_READ | MAY_EXEC);
if (error)
goto exit;
- file = dentry_open(nd.dentry, nd.mnt, O_RDONLY);
+ file = nameidata_to_filp(&nd, O_RDONLY);
error = PTR_ERR(file);
if (IS_ERR(file))
goto out;
out:
return error;
exit:
+ release_open_intent(&nd);
path_release(&nd);
goto out;
}
argv++;
if(++i > max)
return -E2BIG;
+ cond_resched();
}
}
return i;
* memory to free pages in kernel mem. These are in a format ready
* to be put directly into the top of new user memory.
*/
-int copy_strings(int argc,char __user * __user * argv, struct linux_binprm *bprm)
+static int copy_strings(int argc, char __user * __user * argv,
+ struct linux_binprm *bprm)
{
struct page *kmapped_page = NULL;
char *kaddr = NULL;
struct page *page, unsigned long address)
{
struct mm_struct *mm = vma->vm_mm;
- pgd_t * pgd;
- pmd_t * pmd;
pte_t * pte;
+ spinlock_t *ptl;
if (unlikely(anon_vma_prepare(vma)))
- goto out_sig;
+ goto out;
flush_dcache_page(page);
- pgd = pgd_offset(mm, address);
-
- spin_lock(&mm->page_table_lock);
- pmd = pmd_alloc(mm, pgd, address);
- if (!pmd)
- goto out;
- pte = pte_alloc_map(mm, pmd, address);
+ pte = get_locked_pte(mm, address, &ptl);
if (!pte)
goto out;
if (!pte_none(*pte)) {
- pte_unmap(pte);
+ pte_unmap_unlock(pte, ptl);
goto out;
}
- // mm->rss++;
- vx_rsspages_inc(mm);
+ inc_mm_counter(mm, anon_rss);
lru_cache_add_active(page);
- set_pte(pte, pte_mkdirty(pte_mkwrite(mk_pte(
+ set_pte_at(mm, address, pte, pte_mkdirty(pte_mkwrite(mk_pte(
page, vma->vm_page_prot))));
- page_add_anon_rmap(page, vma, address);
- pte_unmap(pte);
- spin_unlock(&mm->page_table_lock);
+ page_add_new_anon_rmap(page, vma, address);
+ pte_unmap_unlock(pte, ptl);
/* no need for flush_tlb */
return;
out:
- spin_unlock(&mm->page_table_lock);
-out_sig:
__free_page(page);
force_sig(SIGKILL, current);
}
-int setup_arg_pages(struct linux_binprm *bprm, int executable_stack)
+#define EXTRA_STACK_VM_PAGES 20 /* random */
+
+int setup_arg_pages(struct linux_binprm *bprm,
+ unsigned long stack_top,
+ int executable_stack)
{
unsigned long stack_base;
struct vm_area_struct *mpnt;
struct mm_struct *mm = current->mm;
- int i;
+ int i, ret;
long arg_size;
#ifdef CONFIG_STACK_GROWSUP
memmove(to, to + offset, PAGE_SIZE - offset);
kunmap(bprm->page[j - 1]);
- /* Adjust bprm->p to point to the end of the strings. */
- bprm->p = PAGE_SIZE * i - offset;
-
/* Limit stack size to 1GB */
- stack_base = current->rlim[RLIMIT_STACK].rlim_max;
+ stack_base = current->signal->rlim[RLIMIT_STACK].rlim_max;
if (stack_base > (1 << 30))
stack_base = 1 << 30;
- stack_base = PAGE_ALIGN(STACK_TOP - stack_base);
+ stack_base = PAGE_ALIGN(stack_top - stack_base);
+
+ /* Adjust bprm->p to point to the end of the strings. */
+ bprm->p = stack_base + PAGE_SIZE * i - offset;
mm->arg_start = stack_base;
arg_size = i << PAGE_SHIFT;
while (i < MAX_ARG_PAGES)
bprm->page[i++] = NULL;
#else
- stack_base = STACK_TOP - MAX_ARG_PAGES * PAGE_SIZE;
- mm->arg_start = bprm->p + stack_base;
- arg_size = STACK_TOP - (PAGE_MASK & (unsigned long) mm->arg_start);
+ stack_base = arch_align_stack(stack_top - MAX_ARG_PAGES*PAGE_SIZE);
+ stack_base = PAGE_ALIGN(stack_base);
+ bprm->p += stack_base;
+ mm->arg_start = bprm->p;
+ arg_size = stack_top - (PAGE_MASK & (unsigned long) mm->arg_start);
#endif
- bprm->p += stack_base;
+ arg_size += EXTRA_STACK_VM_PAGES * PAGE_SIZE;
+
if (bprm->loader)
bprm->loader += stack_base;
bprm->exec += stack_base;
if (!mpnt)
return -ENOMEM;
- if (security_vm_enough_memory(arg_size >> PAGE_SHIFT) ||
- !vx_vmpages_avail(mm, arg_size >> PAGE_SHIFT)) {
- kmem_cache_free(vm_area_cachep, mpnt);
- return -ENOMEM;
- }
-
memset(mpnt, 0, sizeof(*mpnt));
down_write(&mm->mmap_sem);
mpnt->vm_mm = mm;
#ifdef CONFIG_STACK_GROWSUP
mpnt->vm_start = stack_base;
- mpnt->vm_end = PAGE_MASK &
- (PAGE_SIZE - 1 + (unsigned long) bprm->p);
+ mpnt->vm_end = stack_base + arg_size;
#else
- mpnt->vm_start = PAGE_MASK & (unsigned long) bprm->p;
- mpnt->vm_end = STACK_TOP;
+ mpnt->vm_end = stack_top;
+ mpnt->vm_start = mpnt->vm_end - arg_size;
#endif
/* Adjust stack execute permissions; explicitly enable
* for EXSTACK_ENABLE_X, disable for EXSTACK_DISABLE_X
mpnt->vm_flags = VM_STACK_FLAGS;
mpnt->vm_flags |= mm->def_flags;
mpnt->vm_page_prot = protection_map[mpnt->vm_flags & 0x7];
- insert_vm_struct(mm, mpnt);
- // mm->stack_vm = mm->total_vm = vma_pages(mpnt);
+ if ((ret = insert_vm_struct(mm, mpnt))) {
+ up_write(&mm->mmap_sem);
+ kmem_cache_free(vm_area_cachep, mpnt);
+ return ret;
+ }
vx_vmpages_sub(mm, mm->total_vm - vma_pages(mpnt));
mm->stack_vm = mm->total_vm;
}
int err;
struct file *file;
- nd.intent.open.flags = FMODE_READ;
- err = path_lookup(name, LOOKUP_FOLLOW|LOOKUP_OPEN, &nd);
+ err = path_lookup_open(AT_FDCWD, name, LOOKUP_FOLLOW, &nd, FMODE_READ|FMODE_EXEC);
file = ERR_PTR(err);
if (!err) {
file = ERR_PTR(-EACCES);
if (!(nd.mnt->mnt_flags & MNT_NOEXEC) &&
S_ISREG(inode->i_mode)) {
- int err = permission(inode, MAY_EXEC, &nd);
+ int err = vfs_permission(&nd, MAY_EXEC);
if (!err && !(inode->i_mode & 0111))
err = -EACCES;
file = ERR_PTR(err);
if (!err) {
- file = dentry_open(nd.dentry, nd.mnt, O_RDONLY);
+ file = nameidata_to_filp(&nd, O_RDONLY);
if (!IS_ERR(file)) {
err = deny_write_access(file);
if (err) {
return file;
}
}
+ release_open_intent(&nd);
path_release(&nd);
}
goto out;
struct task_struct *tsk;
struct mm_struct * old_mm, *active_mm;
- /* Add it to the list of mm's */
- spin_lock(&mmlist_lock);
- list_add(&mm->mmlist, &init_mm.mmlist);
- mmlist_nr++;
- spin_unlock(&mmlist_lock);
-
/* Notify parent that we're no longer interested in the old VM */
tsk = current;
old_mm = current->mm;
mm_release(tsk, old_mm);
+ if (old_mm) {
+ /*
+ * Make sure that if there is a core dump in progress
+ * for the old mm, we get out and die instead of going
+ * through with the exec. We must hold mmap_sem around
+ * checking core_waiters and changing tsk->mm. The
+ * core-inducing thread will increment core_waiters for
+ * each thread whose ->mm == old_mm.
+ */
+ down_read(&old_mm->mmap_sem);
+ if (unlikely(old_mm->core_waiters)) {
+ up_read(&old_mm->mmap_sem);
+ return -EINTR;
+ }
+ }
task_lock(tsk);
active_mm = tsk->active_mm;
tsk->mm = mm;
task_unlock(tsk);
arch_pick_mmap_layout(mm);
if (old_mm) {
- if (active_mm != old_mm) BUG();
+ up_read(&old_mm->mmap_sem);
+ BUG_ON(active_mm != old_mm);
mmput(old_mm);
return 0;
}
* disturbing other processes. (Other processes might share the signal
* table via the CLONE_SIGHAND option to clone().)
*/
-static inline int de_thread(struct task_struct *tsk)
+static int de_thread(struct task_struct *tsk)
{
- struct signal_struct *newsig, *oldsig = tsk->signal;
+ struct signal_struct *sig = tsk->signal;
struct sighand_struct *newsighand, *oldsighand = tsk->sighand;
spinlock_t *lock = &oldsighand->siglock;
+ struct task_struct *leader = NULL;
int count;
/*
* If we don't share sighandlers, then we aren't sharing anything
* and we can just re-use it all.
*/
- if (atomic_read(&oldsighand->count) <= 1)
+ if (atomic_read(&oldsighand->count) <= 1) {
+ BUG_ON(atomic_read(&sig->count) != 1);
+ exit_itimers(sig);
return 0;
+ }
newsighand = kmem_cache_alloc(sighand_cachep, GFP_KERNEL);
if (!newsighand)
return -ENOMEM;
- spin_lock_init(&newsighand->siglock);
- atomic_set(&newsighand->count, 1);
- memcpy(newsighand->action, oldsighand->action, sizeof(newsighand->action));
-
- /*
- * See if we need to allocate a new signal structure
- */
- newsig = NULL;
- if (atomic_read(&oldsig->count) > 1) {
- newsig = kmem_cache_alloc(signal_cachep, GFP_KERNEL);
- if (!newsig) {
- kmem_cache_free(sighand_cachep, newsighand);
- return -ENOMEM;
- }
- atomic_set(&newsig->count, 1);
- newsig->group_exit = 0;
- newsig->group_exit_code = 0;
- newsig->group_exit_task = NULL;
- newsig->group_stop_count = 0;
- newsig->curr_target = NULL;
- init_sigpending(&newsig->shared_pending);
- INIT_LIST_HEAD(&newsig->posix_timers);
-
- newsig->tty = oldsig->tty;
- newsig->pgrp = oldsig->pgrp;
- newsig->session = oldsig->session;
- newsig->leader = oldsig->leader;
- newsig->tty_old_pgrp = oldsig->tty_old_pgrp;
- }
-
if (thread_group_empty(current))
goto no_thread_group;
*/
read_lock(&tasklist_lock);
spin_lock_irq(lock);
- if (oldsig->group_exit) {
+ if (sig->flags & SIGNAL_GROUP_EXIT) {
/*
* Another group action in progress, just
* return so that the signal is processed.
spin_unlock_irq(lock);
read_unlock(&tasklist_lock);
kmem_cache_free(sighand_cachep, newsighand);
- if (newsig)
- kmem_cache_free(signal_cachep, newsig);
return -EAGAIN;
}
- oldsig->group_exit = 1;
+
+ /*
+ * child_reaper ignores SIGKILL, change it now.
+ * Reparenting needs write_lock on tasklist_lock,
+ * so it is safe to do it under read_lock.
+ */
+ if (unlikely(current->group_leader == child_reaper))
+ child_reaper = current;
+
zap_other_threads(current);
read_unlock(&tasklist_lock);
/*
* Account for the thread group leader hanging around:
*/
- count = 2;
- if (current->pid == current->tgid)
- count = 1;
- while (atomic_read(&oldsig->count) > count) {
- oldsig->group_exit_task = current;
- oldsig->notify_count = count;
+ count = 1;
+ if (!thread_group_leader(current)) {
+ count = 2;
+ /*
+ * The SIGALRM timer survives the exec, but needs to point
+ * at us as the new group leader now. We have a race with
+ * a timer firing now getting the old leader, so we need to
+ * synchronize with any firing (by calling del_timer_sync)
+ * before we can safely let the old group leader die.
+ */
+ sig->tsk = current;
+ spin_unlock_irq(lock);
+ if (hrtimer_cancel(&sig->real_timer))
+ hrtimer_restart(&sig->real_timer);
+ spin_lock_irq(lock);
+ }
+ while (atomic_read(&sig->count) > count) {
+ sig->group_exit_task = current;
+ sig->notify_count = count;
__set_current_state(TASK_UNINTERRUPTIBLE);
spin_unlock_irq(lock);
schedule();
spin_lock_irq(lock);
}
+ sig->group_exit_task = NULL;
+ sig->notify_count = 0;
spin_unlock_irq(lock);
/*
* do is to wait for the thread group leader to become inactive,
* and to assume its PID:
*/
- if (current->pid != current->tgid) {
- struct task_struct *leader = current->group_leader, *parent;
+ if (!thread_group_leader(current)) {
struct dentry *proc_dentry1, *proc_dentry2;
- unsigned long state, ptrace;
/*
* Wait for the thread group leader to be a zombie.
* It should already be zombie at this point, most
* of the time.
*/
- while (leader->state != TASK_ZOMBIE)
+ leader = current->group_leader;
+ while (leader->exit_state != EXIT_ZOMBIE)
yield();
+ /*
+ * The only record we have of the real-time age of a
+ * process, regardless of execs it's done, is start_time.
+ * All the past CPU time is accumulated in signal_struct
+ * from sister threads now dead. But in this non-leader
+ * exec, nothing survives from the original leader thread,
+ * whose birth marks the true age of this process now.
+ * When we take on its identity by switching to its PID, we
+ * also take its birthdate (always earlier than our own).
+ */
+ current->start_time = leader->start_time;
+
spin_lock(&leader->proc_lock);
spin_lock(¤t->proc_lock);
proc_dentry1 = proc_pid_unhash(current);
proc_dentry2 = proc_pid_unhash(leader);
write_lock_irq(&tasklist_lock);
- if (leader->tgid != current->tgid)
- BUG();
- if (current->pid == current->tgid)
- BUG();
+ BUG_ON(leader->tgid != current->tgid);
+ BUG_ON(current->pid == current->tgid);
/*
* An exec() starts a new thread group with the
* TGID of the previous thread group. Rehash the
* two threads with a switched PID, and release
* the former thread group leader:
*/
- ptrace = leader->ptrace;
- parent = leader->parent;
- ptrace_unlink(current);
- ptrace_unlink(leader);
- remove_parent(current);
- remove_parent(leader);
-
- switch_exec_pids(leader, current);
+ /* Become a process group leader with the old leader's pid.
+ * Note: The old leader also uses thispid until release_task
+ * is called. Odd but simple and correct.
+ */
+ detach_pid(current, PIDTYPE_PID);
+ current->pid = leader->pid;
+ attach_pid(current, PIDTYPE_PID, current->pid);
+ attach_pid(current, PIDTYPE_PGID, current->signal->pgrp);
+ attach_pid(current, PIDTYPE_SID, current->signal->session);
+ list_add_tail_rcu(¤t->tasks, &init_task.tasks);
- current->parent = current->real_parent = leader->real_parent;
- leader->parent = leader->real_parent = child_reaper;
current->group_leader = current;
- leader->group_leader = leader;
+ leader->group_leader = current;
- add_parent(current, current->parent);
- add_parent(leader, leader->parent);
- if (ptrace) {
- current->ptrace = ptrace;
- __ptrace_link(current, parent);
- }
+ /* Reduce leader to a thread */
+ detach_pid(leader, PIDTYPE_PGID);
+ detach_pid(leader, PIDTYPE_SID);
+ list_del_init(&leader->tasks);
- list_del(¤t->tasks);
- list_add_tail(¤t->tasks, &init_task.tasks);
current->exit_signal = SIGCHLD;
- state = leader->state;
+
+ BUG_ON(leader->exit_state != EXIT_ZOMBIE);
+ leader->exit_state = EXIT_DEAD;
write_unlock_irq(&tasklist_lock);
spin_unlock(&leader->proc_lock);
spin_unlock(¤t->proc_lock);
proc_pid_flush(proc_dentry1);
proc_pid_flush(proc_dentry2);
-
- if (state != TASK_ZOMBIE)
- BUG();
- release_task(leader);
}
+ /*
+ * There may be one thread left which is just exiting,
+ * but it's safe to stop telling the group to kill themselves.
+ */
+ sig->flags = 0;
+
no_thread_group:
+ exit_itimers(sig);
+ if (leader)
+ release_task(leader);
- write_lock_irq(&tasklist_lock);
- spin_lock(&oldsighand->siglock);
- spin_lock(&newsighand->siglock);
-
- if (current == oldsig->curr_target)
- oldsig->curr_target = next_thread(current);
- if (newsig)
- current->signal = newsig;
- current->sighand = newsighand;
- init_sigpending(¤t->pending);
- recalc_sigpending();
-
- spin_unlock(&newsighand->siglock);
- spin_unlock(&oldsighand->siglock);
- write_unlock_irq(&tasklist_lock);
-
- if (newsig && atomic_dec_and_test(&oldsig->count)) {
- exit_itimers(oldsig);
- kmem_cache_free(signal_cachep, oldsig);
- }
+ BUG_ON(atomic_read(&sig->count) != 1);
- if (atomic_dec_and_test(&oldsighand->count))
- kmem_cache_free(sighand_cachep, oldsighand);
+ if (atomic_read(&oldsighand->count) == 1) {
+ /*
+ * Now that we nuked the rest of the thread group,
+ * it turns out we are not sharing sighand any more either.
+ * So we can just keep it.
+ */
+ kmem_cache_free(sighand_cachep, newsighand);
+ } else {
+ /*
+ * Move our state over to newsighand and switch it in.
+ */
+ atomic_set(&newsighand->count, 1);
+ memcpy(newsighand->action, oldsighand->action,
+ sizeof(newsighand->action));
+
+ write_lock_irq(&tasklist_lock);
+ spin_lock(&oldsighand->siglock);
+ spin_lock(&newsighand->siglock);
+
+ rcu_assign_pointer(current->sighand, newsighand);
+ recalc_sigpending();
+
+ spin_unlock(&newsighand->siglock);
+ spin_unlock(&oldsighand->siglock);
+ write_unlock_irq(&tasklist_lock);
- if (!thread_group_empty(current))
- BUG();
- if (current->tgid != current->pid)
- BUG();
+ if (atomic_dec_and_test(&oldsighand->count))
+ kmem_cache_free(sighand_cachep, oldsighand);
+ }
+
+ BUG_ON(!thread_group_leader(current));
return 0;
}
* so that a new one can be started
*/
-static inline void flush_old_files(struct files_struct * files)
+static void flush_old_files(struct files_struct * files)
{
long j = -1;
+ struct fdtable *fdt;
spin_lock(&files->file_lock);
for (;;) {
j++;
i = j * __NFDBITS;
- if (i >= files->max_fds || i >= files->max_fdset)
+ fdt = files_fdtable(files);
+ if (i >= fdt->max_fds || i >= fdt->max_fdset)
break;
- set = files->close_on_exec->fds_bits[j];
+ set = fdt->close_on_exec->fds_bits[j];
if (!set)
continue;
- files->close_on_exec->fds_bits[j] = 0;
+ fdt->close_on_exec->fds_bits[j] = 0;
spin_unlock(&files->file_lock);
for ( ; set ; i++,set >>= 1) {
if (set & 1) {
{
/* buf must be at least sizeof(tsk->comm) in size */
task_lock(tsk);
- memcpy(buf, tsk->comm, sizeof(tsk->comm));
+ strncpy(buf, tsk->comm, sizeof(tsk->comm));
task_unlock(tsk);
}
if (current->euid == current->uid && current->egid == current->gid)
current->mm->dumpable = 1;
+ else
+ current->mm->dumpable = suid_dumpable;
+
name = bprm->filename;
+
+ /* Copies the binary name from after last slash */
for (i=0; (ch = *(name++)) != '\0';) {
if (ch == '/')
- i = 0;
+ i = 0; /* overwrite what we wrote */
else
if (i < (sizeof(tcomm) - 1))
tcomm[i++] = ch;
tcomm[i] = '\0';
set_task_comm(current, tcomm);
+ current->flags &= ~PF_RANDOMIZE;
flush_thread();
+ /* Set the new mm task size. We have to do that late because it may
+ * depend on TIF_32BIT which is only updated in flush_thread() on
+ * some architectures like powerpc
+ */
+ current->mm->task_size = TASK_SIZE;
+
if (bprm->e_uid != current->euid || bprm->e_gid != current->egid ||
- permission(bprm->file->f_dentry->d_inode,MAY_READ, NULL) ||
- (bprm->interp_flags & BINPRM_FLAGS_ENFORCE_NONDUMP))
- current->mm->dumpable = 0;
+ file_permission(bprm->file, MAY_READ) ||
+ (bprm->interp_flags & BINPRM_FLAGS_ENFORCE_NONDUMP)) {
+ suid_keys(current);
+ current->mm->dumpable = suid_dumpable;
+ }
/* An exec changes our domain. We are no longer part of the thread
group */
mode = inode->i_mode;
/*
* Check execute perms again - if the caller has CAP_DAC_OVERRIDE,
- * vfs_permission lets a non-executable through
+ * generic_permission lets a non-executable through
*/
if (!(mode & 0111)) /* with at least _one_ execute bit set */
return -EACCES;
EXPORT_SYMBOL(prepare_binprm);
-static inline int unsafe_exec(struct task_struct *p)
+static int unsafe_exec(struct task_struct *p)
{
int unsafe = 0;
if (p->ptrace & PT_PTRACED) {
void compute_creds(struct linux_binprm *bprm)
{
int unsafe;
+
+ if (bprm->e_uid != current->uid)
+ suid_keys(current);
+ exec_keys(current);
+
task_lock(current);
unsafe = unsafe_exec(current);
security_bprm_apply_creds(bprm, unsafe);
task_unlock(current);
+ security_bprm_post_apply_creds(bprm);
}
EXPORT_SYMBOL(compute_creds);
*/
int search_binary_handler(struct linux_binprm *bprm,struct pt_regs *regs)
{
- int try,retval=0;
+ int try,retval;
struct linux_binfmt *fmt;
#ifdef __alpha__
/* handle /sbin/loader.. */
/* kernel module loader fixup */
/* so we don't try to load run modprobe in kernel space. */
set_fs(USER_DS);
+ retval = -ENOENT;
for (try=0; try<2; try++) {
read_lock(&binfmt_lock);
for (fmt = formats ; fmt ; fmt = fmt->next) {
fput(bprm->file);
bprm->file = NULL;
current->did_exec = 1;
+ proc_exec_connector(current);
return retval;
}
read_lock(&binfmt_lock);
int retval;
int i;
- file = open_exec(filename);
+ retval = -ENOMEM;
+ bprm = kzalloc(sizeof(*bprm), GFP_KERNEL);
+ if (!bprm)
+ goto out_ret;
+ file = open_exec(filename);
retval = PTR_ERR(file);
if (IS_ERR(file))
- return retval;
+ goto out_kfree;
sched_exec();
- retval = -ENOMEM;
- bprm = kmalloc(sizeof(*bprm), GFP_KERNEL);
- if (!bprm)
- goto out_ret;
- memset(bprm, 0, sizeof(*bprm));
-
bprm->p = PAGE_SIZE*MAX_ARG_PAGES-sizeof(void *);
bprm->file = file;
bprm->filename = filename;
bprm->interp = filename;
bprm->mm = mm_alloc();
+ retval = -ENOMEM;
if (!bprm->mm)
goto out_file;
/* execve success */
security_bprm_free(bprm);
+ acct_update_integrals(current);
kfree(bprm);
return retval;
}
allow_write_access(bprm->file);
fput(bprm->file);
}
+
+out_kfree:
kfree(bprm);
out_ret:
return retval;
}
-EXPORT_SYMBOL(do_execve);
-
int set_binfmt(struct linux_binfmt *new)
{
struct linux_binfmt *old = current->binfmt;
case 'h':
down_read(&uts_sem);
rc = snprintf(out_ptr, out_end - out_ptr,
- "%s", system_utsname.nodename);
+ "%s", vx_new_uts(nodename));
up_read(&uts_sem);
if (rc > out_end - out_ptr)
goto out;
struct task_struct *g, *p;
struct task_struct *tsk = current;
struct completion *vfork_done = tsk->vfork_done;
+ int traced = 0;
/*
* Make sure nobody is waiting for us to release the VM,
if (mm == p->mm && p != tsk) {
force_sig_specific(SIGKILL, p);
mm->core_waiters++;
+ if (unlikely(p->ptrace) &&
+ unlikely(p->parent->mm == mm))
+ traced = 1;
}
while_each_thread(g,p);
read_unlock(&tasklist_lock);
+
+ if (unlikely(traced)) {
+ /*
+ * We are zapping a thread and the thread it ptraces.
+ * If the tracee went into a ptrace stop for exit tracing,
+ * we could deadlock since the tracer is waiting for this
+ * coredump to finish. Detach them so they can both die.
+ */
+ write_lock_irq(&tasklist_lock);
+ do_each_thread(g,p) {
+ if (mm == p->mm && p != tsk &&
+ p->ptrace && p->parent->mm == mm) {
+ __ptrace_detach(p, 0);
+ }
+ } while_each_thread(g,p);
+ write_unlock_irq(&tasklist_lock);
+ }
}
static void coredump_wait(struct mm_struct *mm)
{
DECLARE_COMPLETION(startup_done);
+ int core_waiters;
- mm->core_waiters++; /* let other threads block */
mm->core_startup_done = &startup_done;
- /* give other threads a chance to run: */
- yield();
-
zap_threads(mm);
- if (--mm->core_waiters) {
- up_write(&mm->mmap_sem);
+ core_waiters = mm->core_waiters;
+ up_write(&mm->mmap_sem);
+
+ if (core_waiters)
wait_for_completion(&startup_done);
- } else
- up_write(&mm->mmap_sem);
BUG_ON(mm->core_waiters);
}
struct inode * inode;
struct file * file;
int retval = 0;
+ int fsuid = current->fsuid;
+ int flag = 0;
binfmt = current->binfmt;
if (!binfmt || !binfmt->core_dump)
goto fail;
+ if (current->tux_exit)
+ current->tux_exit();
down_write(&mm->mmap_sem);
if (!mm->dumpable) {
up_write(&mm->mmap_sem);
goto fail;
}
+
+ /*
+ * We cannot trust fsuid as being the "true" uid of the
+ * process nor do we know its entire history. We only know it
+ * was tainted so we dump it as root in mode 2.
+ */
+ if (mm->dumpable == 2) { /* Setuid core dump mode */
+ flag = O_EXCL; /* Stop rewrite attacks */
+ current->fsuid = 0; /* Dump root private */
+ }
mm->dumpable = 0;
+
+ retval = -EAGAIN;
+ spin_lock_irq(¤t->sighand->siglock);
+ if (!(current->signal->flags & SIGNAL_GROUP_EXIT)) {
+ current->signal->flags = SIGNAL_GROUP_EXIT;
+ current->signal->group_exit_code = exit_code;
+ current->signal->group_stop_count = 0;
+ retval = 0;
+ }
+ spin_unlock_irq(¤t->sighand->siglock);
+ if (retval) {
+ up_write(&mm->mmap_sem);
+ goto fail;
+ }
+
init_completion(&mm->core_done);
- current->signal->group_exit = 1;
- current->signal->group_exit_code = exit_code;
coredump_wait(mm);
- if (current->rlim[RLIMIT_CORE].rlim_cur < binfmt->min_coredump)
+ /*
+ * Clear any false indication of pending signals that might
+ * be seen by the filesystem code called to write the core file.
+ */
+ clear_thread_flag(TIF_SIGPENDING);
+
+ if (current->signal->rlim[RLIMIT_CORE].rlim_cur < binfmt->min_coredump)
goto fail_unlock;
/*
lock_kernel();
format_corename(corename, core_pattern, signr);
unlock_kernel();
- file = filp_open(corename, O_CREAT | 2 | O_NOFOLLOW | O_LARGEFILE, 0600);
+ file = filp_open(corename, O_CREAT | 2 | O_NOFOLLOW | O_LARGEFILE | flag, 0600);
if (IS_ERR(file))
goto fail_unlock;
inode = file->f_dentry->d_inode;
goto close_fail;
if (!file->f_op->write)
goto close_fail;
- if (do_truncate(file->f_dentry, 0) != 0)
+ if (do_truncate(file->f_dentry, 0, 0, file) != 0)
goto close_fail;
retval = binfmt->core_dump(signr, regs, file);
- current->signal->group_exit_code |= 0x80;
+ if (retval)
+ current->signal->group_exit_code |= 0x80;
close_fail:
filp_close(file, NULL);
fail_unlock:
+ current->fsuid = fsuid;
complete_all(&mm->core_done);
fail:
return retval;