* formats.
*/
-#include <linux/config.h>
#include <linux/slab.h>
#include <linux/file.h>
#include <linux/mman.h>
#include <linux/module.h>
#include <linux/namei.h>
#include <linux/proc_fs.h>
-#include <linux/ptrace.h>
+#include <linux/tracehook.h>
#include <linux/mount.h>
#include <linux/security.h>
#include <linux/syscalls.h>
#include <linux/rmap.h>
#include <linux/acct.h>
+#include <linux/cn_proc.h>
+#include <linux/audit.h>
+#include <linux/vs_base.h>
#include <linux/vs_memory.h>
+#include <linux/vs_cvirt.h>
#include <asm/uaccess.h>
#include <asm/mmu_context.h>
int core_uses_pid;
char core_pattern[65] = "core";
+int suid_dumpable = 0;
+
+EXPORT_SYMBOL(suid_dumpable);
/* The maximal length of core_pattern is also specified in sysctl.c */
static struct linux_binfmt *formats;
struct nameidata nd;
int error;
- nd.intent.open.flags = FMODE_READ;
- error = __user_walk(library, LOOKUP_FOLLOW|LOOKUP_OPEN, &nd);
+ error = __user_path_lookup_open(library, LOOKUP_FOLLOW, &nd, FMODE_READ|FMODE_EXEC);
if (error)
goto out;
if (!S_ISREG(nd.dentry->d_inode->i_mode))
goto exit;
- error = permission(nd.dentry->d_inode, MAY_READ | MAY_EXEC, &nd);
+ error = vfs_permission(&nd, MAY_READ | MAY_EXEC);
if (error)
goto exit;
- file = dentry_open(nd.dentry, nd.mnt, O_RDONLY);
+ file = nameidata_to_filp(&nd, O_RDONLY);
error = PTR_ERR(file);
if (IS_ERR(file))
goto out;
out:
return error;
exit:
+ release_open_intent(&nd);
path_release(&nd);
goto out;
}
struct page *page, unsigned long address)
{
struct mm_struct *mm = vma->vm_mm;
- pgd_t * pgd;
- pud_t * pud;
- pmd_t * pmd;
pte_t * pte;
+ spinlock_t *ptl;
if (unlikely(anon_vma_prepare(vma)))
- goto out_sig;
+ goto out;
flush_dcache_page(page);
- pgd = pgd_offset(mm, address);
-
- spin_lock(&mm->page_table_lock);
- pud = pud_alloc(mm, pgd, address);
- if (!pud)
- goto out;
- pmd = pmd_alloc(mm, pud, address);
- if (!pmd)
- goto out;
- pte = pte_alloc_map(mm, pmd, address);
+ pte = get_locked_pte(mm, address, &ptl);
if (!pte)
goto out;
if (!pte_none(*pte)) {
- pte_unmap(pte);
+ pte_unmap_unlock(pte, ptl);
goto out;
}
- inc_mm_counter(mm, rss);
+ inc_mm_counter(mm, anon_rss);
lru_cache_add_active(page);
set_pte_at(mm, address, pte, pte_mkdirty(pte_mkwrite(mk_pte(
page, vma->vm_page_prot))));
- page_add_anon_rmap(page, vma, address);
- pte_unmap(pte);
- spin_unlock(&mm->page_table_lock);
+ page_add_new_anon_rmap(page, vma, address);
+ pte_unmap_unlock(pte, ptl);
/* no need for flush_tlb */
return;
out:
- spin_unlock(&mm->page_table_lock);
-out_sig:
__free_page(page);
force_sig(SIGKILL, current);
}
if (!mpnt)
return -ENOMEM;
- if (security_vm_enough_memory(arg_size >> PAGE_SHIFT) ||
- !vx_vmpages_avail(mm, arg_size >> PAGE_SHIFT)) {
- kmem_cache_free(vm_area_cachep, mpnt);
- return -ENOMEM;
- }
-
memset(mpnt, 0, sizeof(*mpnt));
down_write(&mm->mmap_sem);
int err;
struct file *file;
- nd.intent.open.flags = FMODE_READ;
- err = path_lookup(name, LOOKUP_FOLLOW|LOOKUP_OPEN, &nd);
+ err = path_lookup_open(AT_FDCWD, name, LOOKUP_FOLLOW, &nd, FMODE_READ|FMODE_EXEC);
file = ERR_PTR(err);
if (!err) {
file = ERR_PTR(-EACCES);
if (!(nd.mnt->mnt_flags & MNT_NOEXEC) &&
S_ISREG(inode->i_mode)) {
- int err = permission(inode, MAY_EXEC, &nd);
- if (!err && !(inode->i_mode & 0111))
- err = -EACCES;
+ int err = vfs_permission(&nd, MAY_EXEC);
file = ERR_PTR(err);
if (!err) {
- file = dentry_open(nd.dentry, nd.mnt, O_RDONLY);
+ file = nameidata_to_filp(&nd, O_RDONLY);
if (!IS_ERR(file)) {
err = deny_write_access(file);
if (err) {
return file;
}
}
+ release_open_intent(&nd);
path_release(&nd);
}
goto out;
arch_pick_mmap_layout(mm);
if (old_mm) {
up_read(&old_mm->mmap_sem);
- if (active_mm != old_mm) BUG();
+ BUG_ON(active_mm != old_mm);
mmput(old_mm);
return 0;
}
* disturbing other processes. (Other processes might share the signal
* table via the CLONE_SIGHAND option to clone().)
*/
-static inline int de_thread(struct task_struct *tsk)
+static int de_thread(struct task_struct *tsk)
{
struct signal_struct *sig = tsk->signal;
struct sighand_struct *newsighand, *oldsighand = tsk->sighand;
spinlock_t *lock = &oldsighand->siglock;
+ struct task_struct *leader = NULL;
int count;
/*
kmem_cache_free(sighand_cachep, newsighand);
return -EAGAIN;
}
+
+ /*
+ * child_reaper ignores SIGKILL, change it now.
+ * Reparenting needs write_lock on tasklist_lock,
+ * so it is safe to do it under read_lock.
+ */
+ if (unlikely(current->group_leader == child_reaper))
+ child_reaper = current;
+
zap_other_threads(current);
read_unlock(&tasklist_lock);
/*
* Account for the thread group leader hanging around:
*/
- count = 2;
- if (thread_group_leader(current))
- count = 1;
+ count = 1;
+ if (!thread_group_leader(current)) {
+ count = 2;
+ /*
+ * The SIGALRM timer survives the exec, but needs to point
+ * at us as the new group leader now. We have a race with
+ * a timer firing now getting the old leader, so we need to
+ * synchronize with any firing (by calling del_timer_sync)
+ * before we can safely let the old group leader die.
+ */
+ sig->tsk = current;
+ spin_unlock_irq(lock);
+ if (hrtimer_cancel(&sig->real_timer))
+ hrtimer_restart(&sig->real_timer);
+ spin_lock_irq(lock);
+ }
while (atomic_read(&sig->count) > count) {
sig->group_exit_task = current;
sig->notify_count = count;
}
sig->group_exit_task = NULL;
sig->notify_count = 0;
- sig->real_timer.data = (unsigned long)current;
spin_unlock_irq(lock);
/*
* and to assume its PID:
*/
if (!thread_group_leader(current)) {
- struct task_struct *leader = current->group_leader, *parent;
- struct dentry *proc_dentry1, *proc_dentry2;
- unsigned long exit_state, ptrace;
-
/*
* Wait for the thread group leader to be a zombie.
* It should already be zombie at this point, most
* of the time.
*/
+ leader = current->group_leader;
while (leader->exit_state != EXIT_ZOMBIE)
yield();
- spin_lock(&leader->proc_lock);
- spin_lock(¤t->proc_lock);
- proc_dentry1 = proc_pid_unhash(current);
- proc_dentry2 = proc_pid_unhash(leader);
+ /*
+ * The only record we have of the real-time age of a
+ * process, regardless of execs it's done, is start_time.
+ * All the past CPU time is accumulated in signal_struct
+ * from sister threads now dead. But in this non-leader
+ * exec, nothing survives from the original leader thread,
+ * whose birth marks the true age of this process now.
+ * When we take on its identity by switching to its PID, we
+ * also take its birthdate (always earlier than our own).
+ */
+ current->start_time = leader->start_time;
+
write_lock_irq(&tasklist_lock);
- if (leader->tgid != current->tgid)
- BUG();
- if (current->pid == current->tgid)
- BUG();
+ BUG_ON(leader->tgid != current->tgid);
+ BUG_ON(current->pid == current->tgid);
/*
* An exec() starts a new thread group with the
* TGID of the previous thread group. Rehash the
* two threads with a switched PID, and release
* the former thread group leader:
*/
- ptrace = leader->ptrace;
- parent = leader->parent;
- if (unlikely(ptrace) && unlikely(parent == current)) {
- /*
- * Joker was ptracing his own group leader,
- * and now he wants to be his own parent!
- * We can't have that.
- */
- ptrace = 0;
- }
-
- ptrace_unlink(current);
- ptrace_unlink(leader);
- remove_parent(current);
- remove_parent(leader);
- switch_exec_pids(leader, current);
+ /* Become a process group leader with the old leader's pid.
+ * Note: The old leader also uses thispid until release_task
+ * is called. Odd but simple and correct.
+ */
+ detach_pid(current, PIDTYPE_PID);
+ current->pid = leader->pid;
+ attach_pid(current, PIDTYPE_PID, current->pid);
+ attach_pid(current, PIDTYPE_PGID, current->signal->pgrp);
+ attach_pid(current, PIDTYPE_SID, current->signal->session);
+ list_replace_rcu(&leader->tasks, ¤t->tasks);
- current->parent = current->real_parent = leader->real_parent;
- leader->parent = leader->real_parent = child_reaper;
current->group_leader = current;
- leader->group_leader = leader;
+ leader->group_leader = current;
- add_parent(current, current->parent);
- add_parent(leader, leader->parent);
- if (ptrace) {
- current->ptrace = ptrace;
- __ptrace_link(current, parent);
- }
+ /* Reduce leader to a thread */
+ detach_pid(leader, PIDTYPE_PGID);
+ detach_pid(leader, PIDTYPE_SID);
- list_del(¤t->tasks);
- list_add_tail(¤t->tasks, &init_task.tasks);
current->exit_signal = SIGCHLD;
- exit_state = leader->exit_state;
- write_unlock_irq(&tasklist_lock);
- spin_unlock(&leader->proc_lock);
- spin_unlock(¤t->proc_lock);
- proc_pid_flush(proc_dentry1);
- proc_pid_flush(proc_dentry2);
+ BUG_ON(leader->exit_state != EXIT_ZOMBIE);
+ leader->exit_state = EXIT_DEAD;
- if (exit_state != EXIT_ZOMBIE)
- BUG();
- release_task(leader);
+ write_unlock_irq(&tasklist_lock);
}
/*
- * Now there are really no other threads at all,
- * so it's safe to stop telling them to kill themselves.
+ * There may be one thread left which is just exiting,
+ * but it's safe to stop telling the group to kill themselves.
*/
sig->flags = 0;
no_thread_group:
- BUG_ON(atomic_read(&sig->count) != 1);
exit_itimers(sig);
+ if (leader)
+ release_task(leader);
+
+ BUG_ON(atomic_read(&sig->count) != 1);
if (atomic_read(&oldsighand->count) == 1) {
/*
/*
* Move our state over to newsighand and switch it in.
*/
- spin_lock_init(&newsighand->siglock);
atomic_set(&newsighand->count, 1);
memcpy(newsighand->action, oldsighand->action,
sizeof(newsighand->action));
write_lock_irq(&tasklist_lock);
spin_lock(&oldsighand->siglock);
- spin_lock(&newsighand->siglock);
+ spin_lock_nested(&newsighand->siglock, SINGLE_DEPTH_NESTING);
- current->sighand = newsighand;
+ rcu_assign_pointer(current->sighand, newsighand);
recalc_sigpending();
spin_unlock(&newsighand->siglock);
kmem_cache_free(sighand_cachep, oldsighand);
}
- if (!thread_group_empty(current))
- BUG();
- if (!thread_group_leader(current))
- BUG();
+ BUG_ON(!thread_group_leader(current));
return 0;
}
* so that a new one can be started
*/
-static inline void flush_old_files(struct files_struct * files)
+static void flush_old_files(struct files_struct * files)
{
long j = -1;
+ struct fdtable *fdt;
spin_lock(&files->file_lock);
for (;;) {
j++;
i = j * __NFDBITS;
- if (i >= files->max_fds || i >= files->max_fdset)
+ fdt = files_fdtable(files);
+ if (i >= fdt->max_fds || i >= fdt->max_fdset)
break;
- set = files->close_on_exec->fds_bits[j];
+ set = fdt->close_on_exec->fds_bits[j];
if (!set)
continue;
- files->close_on_exec->fds_bits[j] = 0;
+ fdt->close_on_exec->fds_bits[j] = 0;
spin_unlock(&files->file_lock);
for ( ; set ; i++,set >>= 1) {
if (set & 1) {
bprm->mm = NULL; /* We're using it now */
/* This is the point of no return */
- steal_locks(files);
put_files_struct(files);
current->sas_ss_sp = current->sas_ss_size = 0;
if (current->euid == current->uid && current->egid == current->gid)
current->mm->dumpable = 1;
+ else
+ current->mm->dumpable = suid_dumpable;
+
name = bprm->filename;
/* Copies the binary name from after last slash */
current->flags &= ~PF_RANDOMIZE;
flush_thread();
+ /* Set the new mm task size. We have to do that late because it may
+ * depend on TIF_32BIT which is only updated in flush_thread() on
+ * some architectures like powerpc
+ */
+ current->mm->task_size = TASK_SIZE;
+
if (bprm->e_uid != current->euid || bprm->e_gid != current->egid ||
- permission(bprm->file->f_dentry->d_inode,MAY_READ, NULL) ||
+ file_permission(bprm->file, MAY_READ) ||
(bprm->interp_flags & BINPRM_FLAGS_ENFORCE_NONDUMP)) {
suid_keys(current);
- current->mm->dumpable = 0;
+ current->mm->dumpable = suid_dumpable;
}
/* An exec changes our domain. We are no longer part of the thread
int retval;
mode = inode->i_mode;
- /*
- * Check execute perms again - if the caller has CAP_DAC_OVERRIDE,
- * generic_permission lets a non-executable through
- */
- if (!(mode & 0111)) /* with at least _one_ execute bit set */
- return -EACCES;
if (bprm->file->f_op == NULL)
return -EACCES;
EXPORT_SYMBOL(prepare_binprm);
-static inline int unsafe_exec(struct task_struct *p)
+static int unsafe_exec(struct task_struct *p)
{
- int unsafe = 0;
- if (p->ptrace & PT_PTRACED) {
- if (p->ptrace & PT_PTRACE_CAP)
- unsafe |= LSM_UNSAFE_PTRACE_CAP;
- else
- unsafe |= LSM_UNSAFE_PTRACE;
- }
+ int unsafe = tracehook_unsafe_exec(p);
if (atomic_read(&p->fs->count) > 1 ||
atomic_read(&p->files->count) > 1 ||
atomic_read(&p->sighand->count) > 1)
/* kernel module loader fixup */
/* so we don't try to load run modprobe in kernel space. */
set_fs(USER_DS);
+
+ retval = audit_bprm(bprm);
+ if (retval)
+ return retval;
+
retval = -ENOENT;
for (try=0; try<2; try++) {
read_lock(&binfmt_lock);
fput(bprm->file);
bprm->file = NULL;
current->did_exec = 1;
+ proc_exec_connector(current);
+ tracehook_report_exec(bprm, regs);
return retval;
}
read_lock(&binfmt_lock);
int i;
retval = -ENOMEM;
- bprm = kmalloc(sizeof(*bprm), GFP_KERNEL);
+ bprm = kzalloc(sizeof(*bprm), GFP_KERNEL);
if (!bprm)
goto out_ret;
- memset(bprm, 0, sizeof(*bprm));
file = open_exec(filename);
retval = PTR_ERR(file);
/* execve success */
security_bprm_free(bprm);
acct_update_integrals(current);
- update_mem_hiwater(current);
kfree(bprm);
return retval;
}
case 'h':
down_read(&uts_sem);
rc = snprintf(out_ptr, out_end - out_ptr,
- "%s", system_utsname.nodename);
+ "%s", vx_new_uts(nodename));
up_read(&uts_sem);
if (rc > out_end - out_ptr)
goto out;
*out_ptr = 0;
}
-static void zap_threads (struct mm_struct *mm)
+static void zap_process(struct task_struct *start)
{
- struct task_struct *g, *p;
- struct task_struct *tsk = current;
- struct completion *vfork_done = tsk->vfork_done;
- int traced = 0;
+ struct task_struct *t;
- /*
- * Make sure nobody is waiting for us to release the VM,
- * otherwise we can deadlock when we wait on each other
- */
- if (vfork_done) {
- tsk->vfork_done = NULL;
- complete(vfork_done);
- }
+ start->signal->flags = SIGNAL_GROUP_EXIT;
+ start->signal->group_stop_count = 0;
- read_lock(&tasklist_lock);
- do_each_thread(g,p)
- if (mm == p->mm && p != tsk) {
- force_sig_specific(SIGKILL, p);
- mm->core_waiters++;
- if (unlikely(p->ptrace) &&
- unlikely(p->parent->mm == mm))
- traced = 1;
+ t = start;
+ do {
+ if (t != current && t->mm) {
+ t->mm->core_waiters++;
+ sigaddset(&t->pending.signal, SIGKILL);
+ signal_wake_up(t, 1);
}
- while_each_thread(g,p);
+ } while ((t = next_thread(t)) != start);
+}
- read_unlock(&tasklist_lock);
+static inline int zap_threads(struct task_struct *tsk, struct mm_struct *mm,
+ int exit_code)
+{
+ struct task_struct *g, *p;
+ unsigned long flags;
+ int err = -EAGAIN;
+
+ spin_lock_irq(&tsk->sighand->siglock);
+ if (!(tsk->signal->flags & SIGNAL_GROUP_EXIT)) {
+ tsk->signal->group_exit_code = exit_code;
+ zap_process(tsk);
+ err = 0;
+ }
+ spin_unlock_irq(&tsk->sighand->siglock);
+ if (err)
+ return err;
- if (unlikely(traced)) {
- /*
- * We are zapping a thread and the thread it ptraces.
- * If the tracee went into a ptrace stop for exit tracing,
- * we could deadlock since the tracer is waiting for this
- * coredump to finish. Detach them so they can both die.
- */
- write_lock_irq(&tasklist_lock);
- do_each_thread(g,p) {
- if (mm == p->mm && p != tsk &&
- p->ptrace && p->parent->mm == mm) {
- __ptrace_unlink(p);
+ if (atomic_read(&mm->mm_users) == mm->core_waiters + 1)
+ goto done;
+
+ rcu_read_lock();
+ for_each_process(g) {
+ if (g == tsk->group_leader)
+ continue;
+
+ p = g;
+ do {
+ if (p->mm) {
+ if (p->mm == mm) {
+ /*
+ * p->sighand can't disappear, but
+ * may be changed by de_thread()
+ */
+ lock_task_sighand(p, &flags);
+ zap_process(p);
+ unlock_task_sighand(p, &flags);
+ }
+ break;
}
- } while_each_thread(g,p);
- write_unlock_irq(&tasklist_lock);
+ } while ((p = next_thread(p)) != g);
}
+ rcu_read_unlock();
+done:
+ return mm->core_waiters;
}
-static void coredump_wait(struct mm_struct *mm)
+static int coredump_wait(int exit_code)
{
- DECLARE_COMPLETION(startup_done);
+ struct task_struct *tsk = current;
+ struct mm_struct *mm = tsk->mm;
+ struct completion startup_done;
+ struct completion *vfork_done;
+ int core_waiters;
- mm->core_waiters++; /* let other threads block */
+ init_completion(&mm->core_done);
+ init_completion(&startup_done);
mm->core_startup_done = &startup_done;
- /* give other threads a chance to run: */
- yield();
+ core_waiters = zap_threads(tsk, mm, exit_code);
+ up_write(&mm->mmap_sem);
- zap_threads(mm);
- if (--mm->core_waiters) {
- up_write(&mm->mmap_sem);
+ if (unlikely(core_waiters < 0))
+ goto fail;
+
+ /*
+ * Make sure nobody is waiting for us to release the VM,
+ * otherwise we can deadlock when we wait on each other
+ */
+ vfork_done = tsk->vfork_done;
+ if (vfork_done) {
+ tsk->vfork_done = NULL;
+ complete(vfork_done);
+ }
+
+ if (core_waiters)
wait_for_completion(&startup_done);
- } else
- up_write(&mm->mmap_sem);
+fail:
BUG_ON(mm->core_waiters);
+ return core_waiters;
}
int do_coredump(long signr, int exit_code, struct pt_regs * regs)
struct inode * inode;
struct file * file;
int retval = 0;
+ int fsuid = current->fsuid;
+ int flag = 0;
binfmt = current->binfmt;
if (!binfmt || !binfmt->core_dump)
up_write(&mm->mmap_sem);
goto fail;
}
+
+ /*
+ * We cannot trust fsuid as being the "true" uid of the
+ * process nor do we know its entire history. We only know it
+ * was tainted so we dump it as root in mode 2.
+ */
+ if (mm->dumpable == 2) { /* Setuid core dump mode */
+ flag = O_EXCL; /* Stop rewrite attacks */
+ current->fsuid = 0; /* Dump root private */
+ }
mm->dumpable = 0;
- init_completion(&mm->core_done);
- spin_lock_irq(¤t->sighand->siglock);
- current->signal->flags = SIGNAL_GROUP_EXIT;
- current->signal->group_exit_code = exit_code;
- spin_unlock_irq(¤t->sighand->siglock);
- coredump_wait(mm);
+
+ retval = coredump_wait(exit_code);
+ if (retval < 0)
+ goto fail;
/*
* Clear any false indication of pending signals that might
* be seen by the filesystem code called to write the core file.
*/
- current->signal->group_stop_count = 0;
clear_thread_flag(TIF_SIGPENDING);
if (current->signal->rlim[RLIMIT_CORE].rlim_cur < binfmt->min_coredump)
lock_kernel();
format_corename(corename, core_pattern, signr);
unlock_kernel();
- file = filp_open(corename, O_CREAT | 2 | O_NOFOLLOW | O_LARGEFILE, 0600);
+ file = filp_open(corename, O_CREAT | 2 | O_NOFOLLOW | O_LARGEFILE | flag, 0600);
if (IS_ERR(file))
goto fail_unlock;
inode = file->f_dentry->d_inode;
goto close_fail;
if (!file->f_op->write)
goto close_fail;
- if (do_truncate(file->f_dentry, 0) != 0)
+ if (do_truncate(file->f_dentry, 0, 0, file) != 0)
goto close_fail;
retval = binfmt->core_dump(signr, regs, file);
close_fail:
filp_close(file, NULL);
fail_unlock:
+ current->fsuid = fsuid;
complete_all(&mm->core_done);
fail:
return retval;