#include <linux/interrupt.h>
#include <linux/smp_lock.h>
#include <linux/module.h>
+#include <linux/capability.h>
#include <linux/completion.h>
#include <linux/personality.h>
#include <linux/tty.h>
#include <linux/mount.h>
#include <linux/proc_fs.h>
#include <linux/mempolicy.h>
-#include <linux/ckrm.h>
-#include <linux/ckrm_tsk.h>
-#include <linux/vs_limit.h>
-#include <linux/ckrm_mem.h>
+#include <linux/cpuset.h>
#include <linux/syscalls.h>
+#include <linux/signal.h>
+#include <linux/cn_proc.h>
+#include <linux/mutex.h>
#include <linux/vs_limit.h>
+#include <linux/vs_context.h>
+#include <linux/vs_network.h>
+#include <linux/vs_cvirt.h>
#include <asm/uaccess.h>
#include <asm/unistd.h>
int getrusage(struct task_struct *, int, struct rusage __user *);
+static void exit_mm(struct task_struct * tsk);
+
static void __unhash_process(struct task_struct *p)
{
nr_threads--;
- /* tasklist_lock is held, is this sufficient? */
- if (p->vx_info) {
- atomic_dec(&p->vx_info->cvirt.nr_threads);
- vx_nproc_dec(p);
- }
detach_pid(p, PIDTYPE_PID);
detach_pid(p, PIDTYPE_TGID);
if (thread_group_leader(p)) {
__ptrace_unlink(p);
BUG_ON(!list_empty(&p->ptrace_list) || !list_empty(&p->ptrace_children));
__exit_signal(p);
- __exit_sighand(p);
+ /*
+ * Note that the fastpath in sys_times depends on __exit_signal having
+ * updated the counters before a task is removed from the tasklist of
+ * the process by __unhash_process.
+ */
__unhash_process(p);
/*
do_each_task_pid(pgrp, PIDTYPE_PGID, p) {
if (p == ignored_task
- || p->exit_state >= EXIT_ZOMBIE
+ || p->exit_state
|| p->real_parent->pid == 1)
continue;
if (process_group(p->real_parent) != pgrp
return retval;
}
-static inline int has_stopped_jobs(int pgrp)
+static int has_stopped_jobs(int pgrp)
{
int retval = 0;
struct task_struct *p;
}
/**
- * reparent_to_init() - Reparent the calling kernel thread to the init task.
+ * reparent_to_init - Reparent the calling kernel thread to the init task.
*
* If a kernel thread is launched as a result of a system call, or if
* it ever exits, it should generally reparent itself to init so that
*
* NOTE that reparent_to_init() gives the caller full capabilities.
*/
-void reparent_to_init(void)
+static void reparent_to_init(void)
{
write_lock_irq(&tasklist_lock);
ptrace_unlink(current);
/* Reparent to init */
REMOVE_LINKS(current);
- /* FIXME handle vchild_reaper/initpid */
current->parent = child_reaper;
current->real_parent = child_reaper;
SET_LINKS(current);
/* Set the exit signal to SIGCHLD so we signal init on exit */
current->exit_signal = SIGCHLD;
- if ((current->policy == SCHED_NORMAL) && (task_nice(current) < 0))
+ if ((current->policy == SCHED_NORMAL ||
+ current->policy == SCHED_BATCH)
+ && (task_nice(current) < 0))
set_user_nice(current, 0);
/* cpus_allowed? */
/* rt_priority? */
void __set_special_pids(pid_t session, pid_t pgrp)
{
- struct task_struct *curr = current;
+ struct task_struct *curr = current->group_leader;
if (curr->signal->session != session) {
detach_pid(curr, PIDTYPE_SID);
*/
int allow_signal(int sig)
{
- if (sig < 1 || sig > _NSIG)
+ if (!valid_signal(sig) || sig < 1)
return -EINVAL;
spin_lock_irq(¤t->sighand->siglock);
int disallow_signal(int sig)
{
- if (sig < 1 || sig > _NSIG)
+ if (!valid_signal(sig) || sig < 1)
return -EINVAL;
spin_lock_irq(¤t->sighand->siglock);
fs = init_task.fs;
current->fs = fs;
atomic_inc(&fs->count);
+ exit_namespace(current);
+ current->namespace = init_task.namespace;
+ get_namespace(current->namespace);
exit_files(current);
current->files = init_task.files;
atomic_inc(¤t->files->count);
EXPORT_SYMBOL(daemonize);
-static inline void close_files(struct files_struct * files)
+static void close_files(struct files_struct * files)
{
int i, j;
+ struct fdtable *fdt;
j = 0;
+
+ /*
+ * It is safe to dereference the fd table without RCU or
+ * ->file_lock because this is the last reference to the
+ * files structure.
+ */
+ fdt = files_fdtable(files);
for (;;) {
unsigned long set;
i = j * __NFDBITS;
- if (i >= files->max_fdset || i >= files->max_fds)
+ if (i >= fdt->max_fdset || i >= fdt->max_fds)
break;
- set = files->open_fds->fds_bits[j++];
+ set = fdt->open_fds->fds_bits[j++];
while (set) {
if (set & 1) {
- struct file * file = xchg(&files->fd[i], NULL);
- if (file)
+ struct file * file = xchg(&fdt->fd[i], NULL);
+ if (file)
filp_close(file, files);
- // vx_openfd_dec(i);
+ vx_openfd_dec(i);
}
i++;
set >>= 1;
void fastcall put_files_struct(struct files_struct *files)
{
+ struct fdtable *fdt;
+
if (atomic_dec_and_test(&files->count)) {
close_files(files);
/*
* Free the fd and fdset arrays if we expanded them.
+ * If the fdtable was embedded, pass files for freeing
+ * at the end of the RCU grace period. Otherwise,
+ * you can free files immediately.
*/
- if (files->fd != &files->fd_array[0])
- free_fd_array(files->fd, files->max_fds);
- if (files->max_fdset > __FD_SETSIZE) {
- free_fdset(files->open_fds, files->max_fdset);
- free_fdset(files->close_on_exec, files->max_fdset);
- }
- kmem_cache_free(files_cachep, files);
+ fdt = files_fdtable(files);
+ if (fdt == &files->fdtab)
+ fdt->free_files = files;
+ else
+ kmem_cache_free(files_cachep, files);
+ free_fdtable(fdt);
}
}
* Turn us into a lazy TLB process if we
* aren't already..
*/
-static inline void __exit_mm(struct task_struct * tsk)
+static void exit_mm(struct task_struct * tsk)
{
struct mm_struct *mm = tsk->mm;
task_lock(tsk);
tsk->mm = NULL;
up_read(&mm->mmap_sem);
-#ifdef CONFIG_CKRM_RES_MEM
- spin_lock(&mm->peertask_lock);
- list_del_init(&tsk->mm_peers);
- ckrm_mem_evaluate_mm(mm);
- spin_unlock(&mm->peertask_lock);
-#endif
enter_lazy_tlb(mm, current);
task_unlock(tsk);
mmput(mm);
}
-void exit_mm(struct task_struct *tsk)
+static inline void choose_new_parent(task_t *p, task_t *reaper)
{
- __exit_mm(tsk);
-}
+ /* check for reaper context */
+ vxwprintk((p->xid != reaper->xid) && (reaper != child_reaper),
+ "rogue reaper: %p[%d,#%u] <> %p[%d,#%u]",
+ p, p->pid, p->xid, reaper, reaper->pid, reaper->xid);
-static inline void choose_new_parent(task_t *p, task_t *reaper, task_t *child_reaper)
-{
/*
* Make sure we're not reparenting to ourselves and that
* the parent is not a zombie.
*/
- BUG_ON(p == reaper || reaper->state >= EXIT_ZOMBIE || reaper->exit_state >= EXIT_ZOMBIE);
+ BUG_ON(p == reaper || reaper->exit_state >= EXIT_ZOMBIE);
p->real_parent = reaper;
- if (p->parent == p->real_parent)
- BUG();
}
-static inline void reparent_thread(task_t *p, task_t *father, int traced)
+static void reparent_thread(task_t *p, task_t *father, int traced)
{
/* We don't want people slaying init. */
if (p->exit_signal != -1)
if (p->pdeath_signal)
/* We already hold the tasklist_lock here. */
- group_send_sig_info(p->pdeath_signal, (void *) 0, p);
+ group_send_sig_info(p->pdeath_signal, SEND_SIG_NOINFO, p);
/* Move the child from its dying parent to the new one. */
if (unlikely(traced)) {
int pgrp = process_group(p);
if (will_become_orphaned_pgrp(pgrp, NULL) && has_stopped_jobs(pgrp)) {
- __kill_pg_info(SIGHUP, (void *)1, pgrp);
- __kill_pg_info(SIGCONT, (void *)1, pgrp);
+ __kill_pg_info(SIGHUP, SEND_SIG_PRIV, pgrp);
+ __kill_pg_info(SIGCONT, SEND_SIG_PRIV, pgrp);
}
}
}
* group, and if no such member exists, give it to
* the global child reaper process (ie "init")
*/
-static inline void forget_original_parent(struct task_struct * father,
+static void forget_original_parent(struct task_struct * father,
struct list_head *to_release)
{
struct task_struct *p, *reaper = father;
struct list_head *_p, *_n;
- /* FIXME handle vchild_reaper/initpid */
do {
reaper = next_thread(reaper);
if (reaper == father) {
- reaper = child_reaper;
+ reaper = vx_child_reaper(father);
break;
}
- } while (reaper->exit_state >= EXIT_ZOMBIE);
+ } while (reaper->exit_state);
/*
* There are only two places where our children can be:
if (father == p->real_parent) {
/* reparent with a reaper, real father it's us */
- choose_new_parent(p, reaper, child_reaper);
+ choose_new_parent(p, vx_child_reaper(p));
reparent_thread(p, father, 0);
} else {
/* reparent ptraced task to its real parent */
}
list_for_each_safe(_p, _n, &father->ptrace_children) {
p = list_entry(_p,struct task_struct,ptrace_list);
- choose_new_parent(p, reaper, child_reaper);
+
+ choose_new_parent(p, reaper);
reparent_thread(p, father, 1);
}
}
struct task_struct *t;
struct list_head ptrace_dead, *_p, *_n;
- ckrm_cb_exit(tsk);
-
- if (signal_pending(tsk) && !tsk->signal->group_exit
+ if (signal_pending(tsk) && !(tsk->signal->flags & SIGNAL_GROUP_EXIT)
&& !thread_group_empty(tsk)) {
/*
* This occurs when there was a race between our exit
(t->signal->session == tsk->signal->session) &&
will_become_orphaned_pgrp(process_group(tsk), tsk) &&
has_stopped_jobs(process_group(tsk))) {
- __kill_pg_info(SIGHUP, (void *)1, process_group(tsk));
- __kill_pg_info(SIGCONT, (void *)1, process_group(tsk));
+ __kill_pg_info(SIGHUP, SEND_SIG_PRIV, process_group(tsk));
+ __kill_pg_info(SIGCONT, SEND_SIG_PRIV, process_group(tsk));
}
/* Let father know we died
}
state = EXIT_ZOMBIE;
- if (tsk->exit_signal == -1 && tsk->ptrace == 0)
+ if (tsk->exit_signal == -1 &&
+ (likely(tsk->ptrace == 0) ||
+ unlikely(tsk->parent->signal->flags & SIGNAL_GROUP_EXIT)))
state = EXIT_DEAD;
tsk->exit_state = state;
- /*
- * Clear these here so that update_process_times() won't try to deliver
- * itimer, profile or rlimit signals to this task while it is in late exit.
- */
- tsk->it_virt_value = 0;
- tsk->it_prof_value = 0;
-
write_unlock_irq(&tasklist_lock);
list_for_each_safe(_p, _n, &ptrace_dead) {
/* If the process is dead, release it - nobody will wait for it */
if (state == EXIT_DEAD)
release_task(tsk);
-
- /* PF_DEAD causes final put_task_struct after we schedule. */
- preempt_disable();
- tsk->flags |= PF_DEAD;
}
fastcall NORET_TYPE void do_exit(long code)
profile_task_exit(tsk);
+ WARN_ON(atomic_read(&tsk->fs_excl));
+
if (unlikely(in_interrupt()))
panic("Aiee, killing interrupt handler!");
if (unlikely(!tsk->pid))
panic("Attempted to kill init!");
if (tsk->io_context)
exit_io_context();
+
+ if (unlikely(current->ptrace & PT_TRACE_EXIT)) {
+ current->ptrace_message = code;
+ ptrace_notify((PTRACE_EVENT_EXIT << 8) | SIGTRAP);
+ }
+
+ /*
+ * We're taking recursive faults here in do_exit. Safest is to just
+ * leave this task alone and wait for reboot.
+ */
+ if (unlikely(tsk->flags & PF_EXITING)) {
+ printk(KERN_ALERT
+ "Fixing recursive fault but reboot is needed!\n");
+ set_current_state(TASK_UNINTERRUPTIBLE);
+ schedule();
+ }
+
tsk->flags |= PF_EXITING;
- del_timer_sync(&tsk->real_timer);
if (unlikely(in_atomic()))
printk(KERN_INFO "note: %s[%d] exited with preempt_count %d\n",
current->comm, current->pid,
preempt_count());
- if (unlikely(current->ptrace & PT_TRACE_EXIT)) {
- current->ptrace_message = code;
- ptrace_notify((PTRACE_EVENT_EXIT << 8) | SIGTRAP);
+ acct_update_integrals(tsk);
+ if (tsk->mm) {
+ update_hiwater_rss(tsk->mm);
+ update_hiwater_vm(tsk->mm);
}
-
group_dead = atomic_dec_and_test(&tsk->signal->live);
- if (group_dead)
+ if (group_dead) {
+ hrtimer_cancel(&tsk->signal->real_timer);
+ exit_itimers(tsk->signal);
acct_process(code);
- if (current->tux_info) {
-#ifdef CONFIG_TUX_DEBUG
- printk("Possibly unexpected TUX-thread exit(%ld) at %p?\n",
- code, __builtin_return_address(0));
-#endif
- current->tux_exit();
}
- __exit_mm(tsk);
+ exit_mm(tsk);
exit_sem(tsk);
__exit_files(tsk);
__exit_fs(tsk);
exit_namespace(tsk);
exit_thread();
+ cpuset_exit(tsk);
exit_keys(tsk);
if (group_dead && tsk->signal->leader)
disassociate_ctty(1);
- module_put(tsk->thread_info->exec_domain->module);
+ module_put(task_thread_info(tsk)->exec_domain->module);
if (tsk->binfmt)
module_put(tsk->binfmt->module);
tsk->exit_code = code;
+ proc_exit_connector(tsk);
+ /* needs to stay before exit_notify() */
+ exit_vx_info_early(tsk, code);
exit_notify(tsk);
#ifdef CONFIG_NUMA
mpol_free(tsk->mempolicy);
tsk->mempolicy = NULL;
#endif
+ /*
+ * If DEBUG_MUTEXES is on, make sure we are holding no locks:
+ */
+ mutex_debug_check_no_locks_held(tsk);
+
+ /* needs to stay after exit_notify() */
+ exit_vx_info(tsk, code);
+ exit_nx_info(tsk);
+
+ /* PF_DEAD causes final put_task_struct after we schedule. */
+ preempt_disable();
+ BUG_ON(tsk->flags & PF_DEAD);
+ tsk->flags |= PF_DEAD;
- BUG_ON(!(current->flags & PF_DEAD));
schedule();
BUG();
/* Avoid "noreturn function does return". */
for (;;) ;
}
+EXPORT_SYMBOL_GPL(do_exit);
+
NORET_TYPE void complete_and_exit(struct completion *comp, long code)
{
if (comp)
task_t fastcall *next_thread(const task_t *p)
{
-#ifdef CONFIG_SMP
- if (!p->sighand)
- BUG();
- if (!spin_is_locked(&p->sighand->siglock) &&
- !rwlock_is_locked(&tasklist_lock))
- BUG();
-#endif
return pid_task(p->pids[PIDTYPE_TGID].pid_list.next, PIDTYPE_TGID);
}
{
BUG_ON(exit_code & 0x80); /* core dumps don't get here */
- if (current->signal->group_exit)
+ if (current->signal->flags & SIGNAL_GROUP_EXIT)
exit_code = current->signal->group_exit_code;
else if (!thread_group_empty(current)) {
struct signal_struct *const sig = current->signal;
struct sighand_struct *const sighand = current->sighand;
read_lock(&tasklist_lock);
spin_lock_irq(&sighand->siglock);
- if (sig->group_exit)
+ if (sig->flags & SIGNAL_GROUP_EXIT)
/* Another thread got here before we took the lock. */
exit_code = sig->group_exit_code;
else {
- sig->group_exit = 1;
sig->group_exit_code = exit_code;
zap_other_threads(current);
}
}
if (likely(p->real_parent == p->parent) && likely(p->signal)) {
+ struct signal_struct *psig;
+ struct signal_struct *sig;
+
/*
* The resource counters for the group leader are in its
* own task_struct. Those for dead threads in the group
* here reaping other children at the same time.
*/
spin_lock_irq(&p->parent->sighand->siglock);
- p->parent->signal->cutime +=
- p->utime + p->signal->utime + p->signal->cutime;
- p->parent->signal->cstime +=
- p->stime + p->signal->stime + p->signal->cstime;
- p->parent->signal->cmin_flt +=
- p->min_flt + p->signal->min_flt + p->signal->cmin_flt;
- p->parent->signal->cmaj_flt +=
- p->maj_flt + p->signal->maj_flt + p->signal->cmaj_flt;
- p->parent->signal->cnvcsw +=
- p->nvcsw + p->signal->nvcsw + p->signal->cnvcsw;
- p->parent->signal->cnivcsw +=
- p->nivcsw + p->signal->nivcsw + p->signal->cnivcsw;
+ psig = p->parent->signal;
+ sig = p->signal;
+ psig->cutime =
+ cputime_add(psig->cutime,
+ cputime_add(p->utime,
+ cputime_add(sig->utime,
+ sig->cutime)));
+ psig->cstime =
+ cputime_add(psig->cstime,
+ cputime_add(p->stime,
+ cputime_add(sig->stime,
+ sig->cstime)));
+ psig->cmin_flt +=
+ p->min_flt + sig->min_flt + sig->cmin_flt;
+ psig->cmaj_flt +=
+ p->maj_flt + sig->maj_flt + sig->cmaj_flt;
+ psig->cnvcsw +=
+ p->nvcsw + sig->nvcsw + sig->cnvcsw;
+ psig->cnivcsw +=
+ p->nivcsw + sig->nivcsw + sig->cnivcsw;
spin_unlock_irq(&p->parent->sighand->siglock);
}
read_unlock(&tasklist_lock);
retval = ru ? getrusage(p, RUSAGE_BOTH, ru) : 0;
- status = p->signal->group_exit
+ status = (p->signal->flags & SIGNAL_GROUP_EXIT)
? p->signal->group_exit_code : p->exit_code;
if (!retval && stat_addr)
retval = put_user(status, stat_addr);
exit_code = p->exit_code;
if (unlikely(!exit_code) ||
- unlikely(p->state > TASK_STOPPED))
+ unlikely(p->state & TASK_TRACED))
goto bail_ref;
return wait_noreap_copyout(p, pid, uid,
why, (exit_code << 8) | 0x7f,
* race with the EXIT_ZOMBIE case.
*/
exit_code = xchg(&p->exit_code, 0);
- if (unlikely(p->exit_state >= EXIT_ZOMBIE)) {
+ if (unlikely(p->exit_state)) {
/*
* The task resumed and then died. Let the next iteration
* catch it in EXIT_ZOMBIE. Note that exit_code might
if (unlikely(!p->signal))
return 0;
- if (p->signal->stop_state >= 0)
+ if (!(p->signal->flags & SIGNAL_STOP_CONTINUED))
return 0;
spin_lock_irq(&p->sighand->siglock);
- if (p->signal->stop_state >= 0) { /* Re-check with the lock held. */
+ /* Re-check with the lock held. */
+ if (!(p->signal->flags & SIGNAL_STOP_CONTINUED)) {
spin_unlock_irq(&p->sighand->siglock);
return 0;
}
if (!noreap)
- p->signal->stop_state = 0;
+ p->signal->flags &= ~SIGNAL_STOP_CONTINUED;
spin_unlock_irq(&p->sighand->siglock);
pid = p->pid;
struct task_struct *tsk;
int flag, retval;
- add_wait_queue(¤t->wait_chldexit,&wait);
+ add_wait_queue(¤t->signal->wait_chldexit,&wait);
repeat:
/*
* We will set this flag if we see any child that might later
switch (p->state) {
case TASK_TRACED:
+ /*
+ * When we hit the race with PTRACE_ATTACH,
+ * we will not report this child. But the
+ * race means it has not yet been moved to
+ * our ptrace_children list, so we need to
+ * set the flag here to avoid a spurious ECHILD
+ * when the race happens with the only child.
+ */
+ flag = 1;
if (!my_ptrace_child(p))
continue;
/*FALLTHROUGH*/
flag = 1;
if (!unlikely(options & WCONTINUED))
continue;
-
retval = wait_task_continued(
p, (options & WNOWAIT),
infop, stat_addr, ru);
retval = -ECHILD;
end:
current->state = TASK_RUNNING;
- remove_wait_queue(¤t->wait_chldexit,&wait);
+ remove_wait_queue(¤t->signal->wait_chldexit,&wait);
if (infop) {
if (retval > 0)
retval = 0;