#include <linux/syscalls.h>
#include <linux/rmap.h>
#include <linux/vs_memory.h>
+#include <linux/acct.h>
+#include <linux/vs_memory.h>
#include <asm/uaccess.h>
#include <asm/mmu_context.h>
int core_uses_pid;
char core_pattern[65] = "core";
-int suid_dumpable = 0;
-
-EXPORT_SYMBOL(suid_dumpable);
/* The maximal length of core_pattern is also specified in sysctl.c */
static struct linux_binfmt *formats;
-static rwlock_t binfmt_lock = RW_LOCK_UNLOCKED;
+static DEFINE_RWLOCK(binfmt_lock);
int register_binfmt(struct linux_binfmt * fmt)
{
argv++;
if(++i > max)
return -E2BIG;
+ cond_resched();
}
}
return i;
* memory to free pages in kernel mem. These are in a format ready
* to be put directly into the top of new user memory.
*/
-int copy_strings(int argc,char __user * __user * argv, struct linux_binprm *bprm)
+static int copy_strings(int argc, char __user * __user * argv,
+ struct linux_binprm *bprm)
{
struct page *kmapped_page = NULL;
char *kaddr = NULL;
{
struct mm_struct *mm = vma->vm_mm;
pgd_t * pgd;
+ pud_t * pud;
pmd_t * pmd;
pte_t * pte;
pgd = pgd_offset(mm, address);
spin_lock(&mm->page_table_lock);
- pmd = pmd_alloc(mm, pgd, address);
+ pud = pud_alloc(mm, pgd, address);
+ if (!pud)
+ goto out;
+ pmd = pmd_alloc(mm, pud, address);
if (!pmd)
goto out;
pte = pte_alloc_map(mm, pmd, address);
pte_unmap(pte);
goto out;
}
- // mm->rss++;
- vx_rsspages_inc(mm);
+ inc_mm_counter(mm, rss);
lru_cache_add_active(page);
- set_pte(pte, pte_mkdirty(pte_mkwrite(mk_pte(
+ set_pte_at(mm, address, pte, pte_mkdirty(pte_mkwrite(mk_pte(
page, vma->vm_page_prot))));
page_add_anon_rmap(page, vma, address);
pte_unmap(pte);
#define EXTRA_STACK_VM_PAGES 20 /* random */
-int setup_arg_pages(struct linux_binprm *bprm, int executable_stack)
+int setup_arg_pages(struct linux_binprm *bprm,
+ unsigned long stack_top,
+ int executable_stack)
{
unsigned long stack_base;
struct vm_area_struct *mpnt;
stack_base = current->signal->rlim[RLIMIT_STACK].rlim_max;
if (stack_base > (1 << 30))
stack_base = 1 << 30;
- stack_base = PAGE_ALIGN(STACK_TOP - stack_base);
+ stack_base = PAGE_ALIGN(stack_top - stack_base);
/* Adjust bprm->p to point to the end of the strings. */
bprm->p = stack_base + PAGE_SIZE * i - offset;
while (i < MAX_ARG_PAGES)
bprm->page[i++] = NULL;
#else
-#ifdef __HAVE_ARCH_ALIGN_STACK
- stack_base = arch_align_stack(STACK_TOP - MAX_ARG_PAGES*PAGE_SIZE);
+ stack_base = arch_align_stack(stack_top - MAX_ARG_PAGES*PAGE_SIZE);
stack_base = PAGE_ALIGN(stack_base);
-#else
- stack_base = STACK_TOP - MAX_ARG_PAGES * PAGE_SIZE;
-#endif
bprm->p += stack_base;
mm->arg_start = bprm->p;
- arg_size = STACK_TOP - (PAGE_MASK & (unsigned long) mm->arg_start);
+ arg_size = stack_top - (PAGE_MASK & (unsigned long) mm->arg_start);
#endif
arg_size += EXTRA_STACK_VM_PAGES * PAGE_SIZE;
mpnt->vm_start = stack_base;
mpnt->vm_end = stack_base + arg_size;
#else
- mpnt->vm_end = STACK_TOP;
+ mpnt->vm_end = stack_top;
mpnt->vm_start = mpnt->vm_end - arg_size;
#endif
/* Adjust stack execute permissions; explicitly enable
kmem_cache_free(vm_area_cachep, mpnt);
return ret;
}
- // mm->stack_vm = mm->total_vm = vma_pages(mpnt);
vx_vmpages_sub(mm, mm->total_vm - vma_pages(mpnt));
mm->stack_vm = mm->total_vm;
}
old_mm = current->mm;
mm_release(tsk, old_mm);
+ if (old_mm) {
+ /*
+ * Make sure that if there is a core dump in progress
+ * for the old mm, we get out and die instead of going
+ * through with the exec. We must hold mmap_sem around
+ * checking core_waiters and changing tsk->mm. The
+ * core-inducing thread will increment core_waiters for
+ * each thread whose ->mm == old_mm.
+ */
+ down_read(&old_mm->mmap_sem);
+ if (unlikely(old_mm->core_waiters)) {
+ up_read(&old_mm->mmap_sem);
+ return -EINTR;
+ }
+ }
task_lock(tsk);
active_mm = tsk->active_mm;
tsk->mm = mm;
task_unlock(tsk);
arch_pick_mmap_layout(mm);
if (old_mm) {
+ up_read(&old_mm->mmap_sem);
if (active_mm != old_mm) BUG();
mmput(old_mm);
return 0;
*/
read_lock(&tasklist_lock);
spin_lock_irq(lock);
- if (sig->group_exit) {
+ if (sig->flags & SIGNAL_GROUP_EXIT) {
/*
* Another group action in progress, just
* return so that the signal is processed.
kmem_cache_free(sighand_cachep, newsighand);
return -EAGAIN;
}
- sig->group_exit = 1;
zap_other_threads(current);
read_unlock(&tasklist_lock);
* Account for the thread group leader hanging around:
*/
count = 2;
- if (current->pid == current->tgid)
+ if (thread_group_leader(current))
count = 1;
while (atomic_read(&sig->count) > count) {
sig->group_exit_task = current;
}
sig->group_exit_task = NULL;
sig->notify_count = 0;
+ sig->real_timer.data = (unsigned long)current;
spin_unlock_irq(lock);
/*
* do is to wait for the thread group leader to become inactive,
* and to assume its PID:
*/
- if (current->pid != current->tgid) {
+ if (!thread_group_leader(current)) {
struct task_struct *leader = current->group_leader, *parent;
struct dentry *proc_dentry1, *proc_dentry2;
unsigned long exit_state, ptrace;
*/
ptrace = leader->ptrace;
parent = leader->parent;
+ if (unlikely(ptrace) && unlikely(parent == current)) {
+ /*
+ * Joker was ptracing his own group leader,
+ * and now he wants to be his own parent!
+ * We can't have that.
+ */
+ ptrace = 0;
+ }
ptrace_unlink(current);
ptrace_unlink(leader);
* Now there are really no other threads at all,
* so it's safe to stop telling them to kill themselves.
*/
- sig->group_exit = 0;
+ sig->flags = 0;
no_thread_group:
BUG_ON(atomic_read(&sig->count) != 1);
if (!thread_group_empty(current))
BUG();
- if (current->tgid != current->pid)
+ if (!thread_group_leader(current))
BUG();
return 0;
}
if (current->euid == current->uid && current->egid == current->gid)
current->mm->dumpable = 1;
- else
- current->mm->dumpable = suid_dumpable;
-
name = bprm->filename;
+
+ /* Copies the binary name from after last slash */
for (i=0; (ch = *(name++)) != '\0';) {
if (ch == '/')
- i = 0;
+ i = 0; /* overwrite what we wrote */
else
if (i < (sizeof(tcomm) - 1))
tcomm[i++] = ch;
tcomm[i] = '\0';
set_task_comm(current, tcomm);
- current->flags &= ~PF_RELOCEXEC;
+ current->flags &= ~PF_RANDOMIZE;
flush_thread();
if (bprm->e_uid != current->euid || bprm->e_gid != current->egid ||
permission(bprm->file->f_dentry->d_inode,MAY_READ, NULL) ||
(bprm->interp_flags & BINPRM_FLAGS_ENFORCE_NONDUMP)) {
suid_keys(current);
- current->mm->dumpable = suid_dumpable;
+ current->mm->dumpable = 0;
}
/* An exec changes our domain. We are no longer part of the thread
unsafe = unsafe_exec(current);
security_bprm_apply_creds(bprm, unsafe);
task_unlock(current);
+ security_bprm_post_apply_creds(bprm);
}
EXPORT_SYMBOL(compute_creds);
/* execve success */
security_bprm_free(bprm);
+ acct_update_integrals(current);
+ update_mem_hiwater(current);
kfree(bprm);
return retval;
}
read_unlock(&tasklist_lock);
- while (unlikely(traced)) {
+ if (unlikely(traced)) {
/*
* We are zapping a thread and the thread it ptraces.
- * The tracee won't come out of TASK_TRACED state until
- * its ptracer detaches. That happens when the ptracer
- * dies, but it synchronizes with us and so won't get
- * that far until we finish the core dump. If we're
- * waiting for the tracee to synchronize but it stays
- * blocked in TASK_TRACED, then we deadlock. So, for
- * this weirdo case we have to do another round with
- * tasklist_lock write-locked to __ptrace_unlink the
- * children that might cause this deadlock. That will
- * wake them up to process their pending SIGKILL.
- *
- * First, give everyone we just killed a chance to run
- * so they can all get into the coredump synchronization.
- * That should leave only the TASK_TRACED stragglers for
- * us to wake up. If a ptracer is still running, we'll
- * have to come around again after letting it finish.
+ * If the tracee went into a ptrace stop for exit tracing,
+ * we could deadlock since the tracer is waiting for this
+ * coredump to finish. Detach them so they can both die.
*/
- yield();
- traced = 0;
write_lock_irq(&tasklist_lock);
do_each_thread(g,p) {
- if (mm != p->mm || p == tsk ||
- !p->ptrace || p->parent->mm != mm)
- continue;
- if ((p->parent->flags & (PF_SIGNALED|PF_EXITING)) ||
- (p->parent->state & (TASK_TRACED|TASK_STOPPED))) {
- /*
- * The parent is in the process of exiting
- * itself, or else it's stopped right now.
- * It cannot be in a ptrace call, and would
- * have to read_lock tasklist_lock before
- * it could start one, so we are safe here.
- */
+ if (mm == p->mm && p != tsk &&
+ p->ptrace && p->parent->mm == mm) {
__ptrace_unlink(p);
- } else {
- /*
- * Blargh! The ptracer is not dying
- * yet, so we cannot be sure that it
- * isn't in the middle of a ptrace call.
- * We'll have to let it run to get into
- * coredump_wait and come around another
- * time to detach its tracee.
- */
- traced = 1;
}
} while_each_thread(g,p);
write_unlock_irq(&tasklist_lock);
struct inode * inode;
struct file * file;
int retval = 0;
- int fsuid = current->fsuid;
- int flag = 0;
binfmt = current->binfmt;
if (!binfmt || !binfmt->core_dump)
up_write(&mm->mmap_sem);
goto fail;
}
-
- /*
- * We cannot trust fsuid as being the "true" uid of the
- * process nor do we know its entire history. We only know it
- * was tainted so we dump it as root in mode 2.
- */
- if (mm->dumpable == 2) { /* Setuid core dump mode */
- flag = O_EXCL; /* Stop rewrite attacks */
- current->fsuid = 0; /* Dump root private */
- }
mm->dumpable = 0;
init_completion(&mm->core_done);
- current->signal->group_exit = 1;
+ spin_lock_irq(¤t->sighand->siglock);
+ current->signal->flags = SIGNAL_GROUP_EXIT;
current->signal->group_exit_code = exit_code;
+ spin_unlock_irq(¤t->sighand->siglock);
coredump_wait(mm);
+ /*
+ * Clear any false indication of pending signals that might
+ * be seen by the filesystem code called to write the core file.
+ */
+ current->signal->group_stop_count = 0;
+ clear_thread_flag(TIF_SIGPENDING);
+
if (current->signal->rlim[RLIMIT_CORE].rlim_cur < binfmt->min_coredump)
goto fail_unlock;
lock_kernel();
format_corename(corename, core_pattern, signr);
unlock_kernel();
- file = filp_open(corename, O_CREAT | 2 | O_NOFOLLOW | O_LARGEFILE | flag, 0600);
+ file = filp_open(corename, O_CREAT | 2 | O_NOFOLLOW | O_LARGEFILE, 0600);
if (IS_ERR(file))
goto fail_unlock;
inode = file->f_dentry->d_inode;
close_fail:
filp_close(file, NULL);
fail_unlock:
- current->fsuid = fsuid;
complete_all(&mm->core_done);
fail:
return retval;