* Copyright (C) 1991, 1992 Linus Torvalds
*/
-#include <linux/config.h>
#include <linux/mm.h>
#include <linux/slab.h>
#include <linux/interrupt.h>
#include <linux/smp_lock.h>
#include <linux/module.h>
+#include <linux/capability.h>
#include <linux/completion.h>
#include <linux/personality.h>
#include <linux/tty.h>
#include <linux/mount.h>
#include <linux/proc_fs.h>
#include <linux/mempolicy.h>
+#include <linux/taskstats_kern.h>
+#include <linux/delayacct.h>
+#include <linux/cpuset.h>
#include <linux/syscalls.h>
+#include <linux/signal.h>
+#include <linux/posix-timers.h>
+#include <linux/cn_proc.h>
+#include <linux/mutex.h>
+#include <linux/futex.h>
+#include <linux/compat.h>
+#include <linux/pipe_fs_i.h>
+#include <linux/audit.h> /* for audit_free() */
+#include <linux/resource.h>
#include <linux/vs_limit.h>
+#include <linux/vs_context.h>
#include <linux/vs_network.h>
#include <asm/uaccess.h>
extern void sem_exit (void);
extern struct task_struct *child_reaper;
-int getrusage(struct task_struct *, int, struct rusage __user *);
+static void exit_mm(struct task_struct * tsk);
static void __unhash_process(struct task_struct *p)
{
nr_threads--;
- /* tasklist_lock is held, is this sufficient? */
- if (p->vx_info) {
- atomic_dec(&p->vx_info->cvirt.nr_threads);
- vx_nproc_dec(p);
- }
detach_pid(p, PIDTYPE_PID);
- detach_pid(p, PIDTYPE_TGID);
if (thread_group_leader(p)) {
detach_pid(p, PIDTYPE_PGID);
detach_pid(p, PIDTYPE_SID);
- if (p->pid)
- __get_cpu_var(process_counts)--;
+
+ list_del_rcu(&p->tasks);
+ __get_cpu_var(process_counts)--;
}
+ list_del_rcu(&p->thread_group);
+ remove_parent(p);
+}
+
+/*
+ * This function expects the tasklist_lock write-locked.
+ */
+static void __exit_signal(struct task_struct *tsk)
+{
+ struct signal_struct *sig = tsk->signal;
+ struct sighand_struct *sighand;
+
+ BUG_ON(!sig);
+ BUG_ON(!atomic_read(&sig->count));
- REMOVE_LINKS(p);
+ rcu_read_lock();
+ sighand = rcu_dereference(tsk->sighand);
+ spin_lock(&sighand->siglock);
+
+ posix_cpu_timers_exit(tsk);
+ if (atomic_dec_and_test(&sig->count))
+ posix_cpu_timers_exit_group(tsk);
+ else {
+ /*
+ * If there is any task waiting for the group exit
+ * then notify it:
+ */
+ if (sig->group_exit_task && atomic_read(&sig->count) == sig->notify_count) {
+ wake_up_process(sig->group_exit_task);
+ sig->group_exit_task = NULL;
+ }
+ if (tsk == sig->curr_target)
+ sig->curr_target = next_thread(tsk);
+ /*
+ * Accumulate here the counters for all threads but the
+ * group leader as they die, so they can be added into
+ * the process-wide totals when those are taken.
+ * The group leader stays around as a zombie as long
+ * as there are other threads. When it gets reaped,
+ * the exit.c code will add its counts into these totals.
+ * We won't ever get here for the group leader, since it
+ * will have been the last reference on the signal_struct.
+ */
+ sig->utime = cputime_add(sig->utime, tsk->utime);
+ sig->stime = cputime_add(sig->stime, tsk->stime);
+ sig->min_flt += tsk->min_flt;
+ sig->maj_flt += tsk->maj_flt;
+ sig->nvcsw += tsk->nvcsw;
+ sig->nivcsw += tsk->nivcsw;
+ sig->sched_time += tsk->sched_time;
+ sig = NULL; /* Marker for below. */
+ }
+
+ __unhash_process(tsk);
+
+ tsk->signal = NULL;
+ tsk->sighand = NULL;
+ spin_unlock(&sighand->siglock);
+ rcu_read_unlock();
+
+ __cleanup_sighand(sighand);
+ clear_tsk_thread_flag(tsk,TIF_SIGPENDING);
+ flush_sigqueue(&tsk->pending);
+ if (sig) {
+ flush_sigqueue(&sig->shared_pending);
+ __cleanup_signal(sig);
+ }
+}
+
+static void delayed_put_task_struct(struct rcu_head *rhp)
+{
+ put_task_struct(container_of(rhp, struct task_struct, rcu));
}
void release_task(struct task_struct * p)
{
+ struct task_struct *leader;
int zap_leader;
- task_t *leader;
- struct dentry *proc_dentry;
-
-repeat:
+repeat:
atomic_dec(&p->user->processes);
- spin_lock(&p->proc_lock);
- proc_dentry = proc_pid_unhash(p);
write_lock_irq(&tasklist_lock);
- if (unlikely(p->ptrace))
- __ptrace_unlink(p);
+ ptrace_unlink(p);
BUG_ON(!list_empty(&p->ptrace_list) || !list_empty(&p->ptrace_children));
__exit_signal(p);
- __exit_sighand(p);
- __unhash_process(p);
/*
* If we are the last non-leader member of the thread
sched_exit(p);
write_unlock_irq(&tasklist_lock);
- spin_unlock(&p->proc_lock);
- proc_pid_flush(proc_dentry);
+ proc_flush_task(p);
release_thread(p);
- if (p->vx_info)
- release_vx_info(p->vx_info, p);
- if (p->nx_info)
- release_nx_info(p->nx_info, p);
- put_task_struct(p);
+ call_rcu(&p->rcu, delayed_put_task_struct);
p = leader;
if (unlikely(zap_leader))
goto repeat;
}
-/* we are using it only for SMP init */
-
-void unhash_process(struct task_struct *p)
-{
- struct dentry *proc_dentry;
-
- spin_lock(&p->proc_lock);
- proc_dentry = proc_pid_unhash(p);
- write_lock_irq(&tasklist_lock);
- __unhash_process(p);
- write_unlock_irq(&tasklist_lock);
- spin_unlock(&p->proc_lock);
- proc_pid_flush(proc_dentry);
-}
-
/*
* This checks not only the pgrp, but falls back on the pid if no
* satisfactory pgrp is found. I dunno - gdb doesn't work correctly
*
* "I ask you, have you ever known what it is to be an orphan?"
*/
-static int will_become_orphaned_pgrp(int pgrp, task_t *ignored_task)
+static int will_become_orphaned_pgrp(int pgrp, struct task_struct *ignored_task)
{
struct task_struct *p;
int ret = 1;
return retval;
}
-static inline int has_stopped_jobs(int pgrp)
+static int has_stopped_jobs(int pgrp)
{
int retval = 0;
struct task_struct *p;
}
/**
- * reparent_to_init() - Reparent the calling kernel thread to the init task.
+ * reparent_to_init - Reparent the calling kernel thread to the init task.
*
* If a kernel thread is launched as a result of a system call, or if
* it ever exits, it should generally reparent itself to init so that
*
* NOTE that reparent_to_init() gives the caller full capabilities.
*/
-void reparent_to_init(void)
+static void reparent_to_init(void)
{
write_lock_irq(&tasklist_lock);
ptrace_unlink(current);
/* Reparent to init */
- REMOVE_LINKS(current);
- /* FIXME handle vchild_reaper/initpid */
+ remove_parent(current);
current->parent = child_reaper;
current->real_parent = child_reaper;
- SET_LINKS(current);
+ add_parent(current);
/* Set the exit signal to SIGCHLD so we signal init on exit */
current->exit_signal = SIGCHLD;
- if ((current->policy == SCHED_NORMAL) && (task_nice(current) < 0))
+ if ((current->policy == SCHED_NORMAL ||
+ current->policy == SCHED_BATCH)
+ && (task_nice(current) < 0))
set_user_nice(current, 0);
/* cpus_allowed? */
/* rt_priority? */
void __set_special_pids(pid_t session, pid_t pgrp)
{
- struct task_struct *curr = current;
+ struct task_struct *curr = current->group_leader;
if (curr->signal->session != session) {
detach_pid(curr, PIDTYPE_SID);
*/
int allow_signal(int sig)
{
- if (sig < 1 || sig > _NSIG)
+ if (!valid_signal(sig) || sig < 1)
return -EINVAL;
spin_lock_irq(¤t->sighand->siglock);
int disallow_signal(int sig)
{
- if (sig < 1 || sig > _NSIG)
+ if (!valid_signal(sig) || sig < 1)
return -EINVAL;
spin_lock_irq(¤t->sighand->siglock);
exit_mm(current);
set_special_pids(1, 1);
- down(&tty_sem);
- current->signal->tty = NULL;
- up(&tty_sem);
+ proc_clear_tty(current);
/* Block and flush all signals */
sigfillset(&blocked);
fs = init_task.fs;
current->fs = fs;
atomic_inc(&fs->count);
+ exit_namespace(current);
+ current->namespace = init_task.namespace;
+ get_namespace(current->namespace);
exit_files(current);
current->files = init_task.files;
atomic_inc(¤t->files->count);
EXPORT_SYMBOL(daemonize);
-static inline void close_files(struct files_struct * files)
+static void close_files(struct files_struct * files)
{
int i, j;
+ struct fdtable *fdt;
j = 0;
+
+ /*
+ * It is safe to dereference the fd table without RCU or
+ * ->file_lock because this is the last reference to the
+ * files structure.
+ */
+ fdt = files_fdtable(files);
for (;;) {
unsigned long set;
i = j * __NFDBITS;
- if (i >= files->max_fdset || i >= files->max_fds)
+ if (i >= fdt->max_fdset || i >= fdt->max_fds)
break;
- set = files->open_fds->fds_bits[j++];
+ set = fdt->open_fds->fds_bits[j++];
while (set) {
if (set & 1) {
- struct file * file = xchg(&files->fd[i], NULL);
+ struct file * file = xchg(&fdt->fd[i], NULL);
if (file)
filp_close(file, files);
vx_openfd_dec(i);
}
i++;
set >>= 1;
+ cond_resched();
}
}
}
void fastcall put_files_struct(struct files_struct *files)
{
+ struct fdtable *fdt;
+
if (atomic_dec_and_test(&files->count)) {
close_files(files);
/*
* Free the fd and fdset arrays if we expanded them.
+ * If the fdtable was embedded, pass files for freeing
+ * at the end of the RCU grace period. Otherwise,
+ * you can free files immediately.
*/
- if (files->fd != &files->fd_array[0])
- free_fd_array(files->fd, files->max_fds);
- if (files->max_fdset > __FD_SETSIZE) {
- free_fdset(files->open_fds, files->max_fdset);
- free_fdset(files->close_on_exec, files->max_fdset);
- }
- kmem_cache_free(files_cachep, files);
+ fdt = files_fdtable(files);
+ if (fdt == &files->fdtab)
+ fdt->free_files = files;
+ else
+ kmem_cache_free(files_cachep, files);
+ free_fdtable(fdt);
}
}
* Turn us into a lazy TLB process if we
* aren't already..
*/
-void exit_mm(struct task_struct * tsk)
+static void exit_mm(struct task_struct * tsk)
{
struct mm_struct *mm = tsk->mm;
down_read(&mm->mmap_sem);
}
atomic_inc(&mm->mm_count);
- if (mm != tsk->active_mm) BUG();
+ BUG_ON(mm != tsk->active_mm);
/* more a memory barrier than a real lock */
task_lock(tsk);
tsk->mm = NULL;
mmput(mm);
}
-static inline void choose_new_parent(task_t *p, task_t *reaper, task_t *child_reaper)
+static inline void
+choose_new_parent(struct task_struct *p, struct task_struct *reaper)
{
+ /* check for reaper context */
+ vxwprintk((p->xid != reaper->xid) && (reaper != child_reaper),
+ "rogue reaper: %p[%d,#%u] <> %p[%d,#%u]",
+ p, p->pid, p->xid, reaper, reaper->pid, reaper->xid);
+
/*
* Make sure we're not reparenting to ourselves and that
* the parent is not a zombie.
*/
- BUG_ON(p == reaper || reaper->exit_state >= EXIT_ZOMBIE);
+ BUG_ON(p == reaper || reaper->exit_state);
p->real_parent = reaper;
}
-static inline void reparent_thread(task_t *p, task_t *father, int traced)
+static void
+reparent_thread(struct task_struct *p, struct task_struct *father, int traced)
{
/* We don't want people slaying init. */
if (p->exit_signal != -1)
if (p->pdeath_signal)
/* We already hold the tasklist_lock here. */
- group_send_sig_info(p->pdeath_signal, (void *) 0, p);
+ group_send_sig_info(p->pdeath_signal, SEND_SIG_NOINFO, p);
/* Move the child from its dying parent to the new one. */
if (unlikely(traced)) {
* anyway, so let go of it.
*/
p->ptrace = 0;
- list_del_init(&p->sibling);
+ remove_parent(p);
p->parent = p->real_parent;
- list_add_tail(&p->sibling, &p->parent->children);
+ add_parent(p);
/* If we'd notified the old parent about this child's death,
* also notify the new parent.
int pgrp = process_group(p);
if (will_become_orphaned_pgrp(pgrp, NULL) && has_stopped_jobs(pgrp)) {
- __kill_pg_info(SIGHUP, (void *)1, pgrp);
- __kill_pg_info(SIGCONT, (void *)1, pgrp);
+ __kill_pg_info(SIGHUP, SEND_SIG_PRIV, pgrp);
+ __kill_pg_info(SIGCONT, SEND_SIG_PRIV, pgrp);
}
}
}
* group, and if no such member exists, give it to
* the global child reaper process (ie "init")
*/
-static inline void forget_original_parent(struct task_struct * father,
- struct list_head *to_release)
+static void
+forget_original_parent(struct task_struct *father, struct list_head *to_release)
{
struct task_struct *p, *reaper = father;
struct list_head *_p, *_n;
- /* FIXME handle vchild_reaper/initpid */
do {
reaper = next_thread(reaper);
if (reaper == father) {
- reaper = child_reaper;
+ reaper = vx_child_reaper(father);
break;
}
} while (reaper->exit_state);
*/
list_for_each_safe(_p, _n, &father->children) {
int ptrace;
- p = list_entry(_p,struct task_struct,sibling);
+ p = list_entry(_p, struct task_struct, sibling);
ptrace = p->ptrace;
if (father == p->real_parent) {
/* reparent with a reaper, real father it's us */
- choose_new_parent(p, reaper, child_reaper);
+ choose_new_parent(p, vx_child_reaper(p));
reparent_thread(p, father, 0);
} else {
/* reparent ptraced task to its real parent */
list_add(&p->ptrace_list, to_release);
}
list_for_each_safe(_p, _n, &father->ptrace_children) {
- p = list_entry(_p,struct task_struct,ptrace_list);
- choose_new_parent(p, reaper, child_reaper);
+ p = list_entry(_p, struct task_struct, ptrace_list);
+ choose_new_parent(p, reaper);
reparent_thread(p, father, 1);
}
}
(t->signal->session == tsk->signal->session) &&
will_become_orphaned_pgrp(process_group(tsk), tsk) &&
has_stopped_jobs(process_group(tsk))) {
- __kill_pg_info(SIGHUP, (void *)1, process_group(tsk));
- __kill_pg_info(SIGCONT, (void *)1, process_group(tsk));
+ __kill_pg_info(SIGHUP, SEND_SIG_PRIV, process_group(tsk));
+ __kill_pg_info(SIGCONT, SEND_SIG_PRIV, process_group(tsk));
}
/* Let father know we died
state = EXIT_DEAD;
tsk->exit_state = state;
- /*
- * Clear these here so that update_process_times() won't try to deliver
- * itimer, profile or rlimit signals to this task while it is in late exit.
- */
- tsk->it_virt_value = cputime_zero;
- tsk->it_prof_value = cputime_zero;
-
write_unlock_irq(&tasklist_lock);
list_for_each_safe(_p, _n, &ptrace_dead) {
list_del_init(_p);
- t = list_entry(_p,struct task_struct,ptrace_list);
+ t = list_entry(_p, struct task_struct, ptrace_list);
release_task(t);
}
/* If the process is dead, release it - nobody will wait for it */
if (state == EXIT_DEAD)
release_task(tsk);
-
- /* PF_DEAD causes final put_task_struct after we schedule. */
- preempt_disable();
- tsk->flags |= PF_DEAD;
}
fastcall NORET_TYPE void do_exit(long code)
{
struct task_struct *tsk = current;
+ struct taskstats *tidstats;
int group_dead;
+ unsigned int mycpu;
profile_task_exit(tsk);
+ WARN_ON(atomic_read(&tsk->fs_excl));
+
if (unlikely(in_interrupt()))
panic("Aiee, killing interrupt handler!");
if (unlikely(!tsk->pid))
panic("Attempted to kill the idle task!");
- if (unlikely(tsk->pid == 1))
+ if (unlikely(tsk == child_reaper))
panic("Attempted to kill init!");
- if (tsk->io_context)
- exit_io_context();
if (unlikely(current->ptrace & PT_TRACE_EXIT)) {
current->ptrace_message = code;
ptrace_notify((PTRACE_EVENT_EXIT << 8) | SIGTRAP);
}
+ /*
+ * We're taking recursive faults here in do_exit. Safest is to just
+ * leave this task alone and wait for reboot.
+ */
+ if (unlikely(tsk->flags & PF_EXITING)) {
+ printk(KERN_ALERT
+ "Fixing recursive fault but reboot is needed!\n");
+ if (tsk->io_context)
+ exit_io_context();
+ set_current_state(TASK_UNINTERRUPTIBLE);
+ schedule();
+ }
+
tsk->flags |= PF_EXITING;
- del_timer_sync(&tsk->real_timer);
if (unlikely(in_atomic()))
printk(KERN_INFO "note: %s[%d] exited with preempt_count %d\n",
current->comm, current->pid,
preempt_count());
- acct_update_integrals();
- update_mem_hiwater();
+ taskstats_exit_alloc(&tidstats, &mycpu);
+
+ acct_update_integrals(tsk);
+ if (tsk->mm) {
+ update_hiwater_rss(tsk->mm);
+ update_hiwater_vm(tsk->mm);
+ }
group_dead = atomic_dec_and_test(&tsk->signal->live);
- if (group_dead)
- acct_process(code);
+ if (group_dead) {
+ hrtimer_cancel(&tsk->signal->real_timer);
+ exit_itimers(tsk->signal);
+ }
+
+ if (current->tux_info) {
+#ifdef CONFIG_TUX_DEBUG
+ printk("Possibly unexpected TUX-thread exit(%ld) at %p?\n",
+ code, __builtin_return_address(0));
+#endif
+ current->tux_exit();
+ }
+
+ acct_collect(code, group_dead);
+ if (unlikely(tsk->robust_list))
+ exit_robust_list(tsk);
+#if defined(CONFIG_FUTEX) && defined(CONFIG_COMPAT)
+ if (unlikely(tsk->compat_robust_list))
+ compat_exit_robust_list(tsk);
+#endif
+ if (unlikely(tsk->audit_context))
+ audit_free(tsk);
+ taskstats_exit_send(tsk, tidstats, group_dead, mycpu);
+ taskstats_exit_free(tidstats);
+
exit_mm(tsk);
+ if (group_dead)
+ acct_process();
exit_sem(tsk);
__exit_files(tsk);
__exit_fs(tsk);
exit_namespace(tsk);
exit_thread();
+ cpuset_exit(tsk);
exit_keys(tsk);
if (group_dead && tsk->signal->leader)
disassociate_ctty(1);
- module_put(tsk->thread_info->exec_domain->module);
+ module_put(task_thread_info(tsk)->exec_domain->module);
if (tsk->binfmt)
module_put(tsk->binfmt->module);
tsk->exit_code = code;
+ proc_exit_connector(tsk);
+ /* needs to stay before exit_notify() */
+ exit_vx_info_early(tsk, code);
exit_notify(tsk);
#ifdef CONFIG_NUMA
mpol_free(tsk->mempolicy);
tsk->mempolicy = NULL;
#endif
+ /*
+ * This must happen late, after the PID is not
+ * hashed anymore:
+ */
+ if (unlikely(!list_empty(&tsk->pi_state_list)))
+ exit_pi_state_list(tsk);
+ if (unlikely(current->pi_state_cache))
+ kfree(current->pi_state_cache);
+ /*
+ * Make sure we are holding no locks:
+ */
+ debug_check_no_locks_held(tsk);
+
+ if (tsk->io_context)
+ exit_io_context();
+
+ if (tsk->splice_pipe)
+ __free_pipe_info(tsk->splice_pipe);
+
+ /* needs to stay after exit_notify() */
+ exit_vx_info(tsk, code);
+ exit_nx_info(tsk);
+
+ /* PF_DEAD causes final put_task_struct after we schedule. */
+ preempt_disable();
+ BUG_ON(tsk->flags & PF_DEAD);
+ tsk->flags |= PF_DEAD;
- BUG_ON(!(current->flags & PF_DEAD));
schedule();
BUG();
/* Avoid "noreturn function does return". */
for (;;) ;
}
+EXPORT_SYMBOL_GPL(do_exit);
+
NORET_TYPE void complete_and_exit(struct completion *comp, long code)
{
if (comp)
do_exit((error_code&0xff)<<8);
}
-task_t fastcall *next_thread(const task_t *p)
-{
- return pid_task(p->pids[PIDTYPE_TGID].pid_list.next, PIDTYPE_TGID);
-}
-
-EXPORT_SYMBOL(next_thread);
-
/*
* Take down every thread in the group. This is called by fatal signals
* as well as by sys_exit_group (below).
else if (!thread_group_empty(current)) {
struct signal_struct *const sig = current->signal;
struct sighand_struct *const sighand = current->sighand;
- read_lock(&tasklist_lock);
spin_lock_irq(&sighand->siglock);
if (sig->flags & SIGNAL_GROUP_EXIT)
/* Another thread got here before we took the lock. */
exit_code = sig->group_exit_code;
else {
- sig->flags = SIGNAL_GROUP_EXIT;
sig->group_exit_code = exit_code;
zap_other_threads(current);
}
spin_unlock_irq(&sighand->siglock);
- read_unlock(&tasklist_lock);
}
do_exit(exit_code);
do_group_exit((error_code & 0xff) << 8);
}
-static int eligible_child(pid_t pid, int options, task_t *p)
+static int eligible_child(pid_t pid, int options, struct task_struct *p)
{
if (pid > 0) {
if (p->pid != pid)
* Do not consider thread group leaders that are
* in a non-empty thread group:
*/
- if (current->tgid != p->tgid && delay_group_leader(p))
+ if (delay_group_leader(p))
return 2;
if (security_task_wait(p))
return 1;
}
-static int wait_noreap_copyout(task_t *p, pid_t pid, uid_t uid,
+static int wait_noreap_copyout(struct task_struct *p, pid_t pid, uid_t uid,
int why, int status,
struct siginfo __user *infop,
struct rusage __user *rusagep)
{
int retval = rusagep ? getrusage(p, RUSAGE_BOTH, rusagep) : 0;
+
put_task_struct(p);
if (!retval)
retval = put_user(SIGCHLD, &infop->si_signo);
* the lock and this task is uninteresting. If we return nonzero, we have
* released the lock and the system call should return.
*/
-static int wait_task_zombie(task_t *p, int noreap,
+static int wait_task_zombie(struct task_struct *p, int noreap,
struct siginfo __user *infop,
int __user *stat_addr, struct rusage __user *ru)
{
}
if (likely(p->real_parent == p->parent) && likely(p->signal)) {
+ struct signal_struct *psig;
+ struct signal_struct *sig;
+
/*
* The resource counters for the group leader are in its
* own task_struct. Those for dead threads in the group
* here reaping other children at the same time.
*/
spin_lock_irq(&p->parent->sighand->siglock);
- p->parent->signal->cutime =
- cputime_add(p->parent->signal->cutime,
+ psig = p->parent->signal;
+ sig = p->signal;
+ psig->cutime =
+ cputime_add(psig->cutime,
cputime_add(p->utime,
- cputime_add(p->signal->utime,
- p->signal->cutime)));
- p->parent->signal->cstime =
- cputime_add(p->parent->signal->cstime,
+ cputime_add(sig->utime,
+ sig->cutime)));
+ psig->cstime =
+ cputime_add(psig->cstime,
cputime_add(p->stime,
- cputime_add(p->signal->stime,
- p->signal->cstime)));
- p->parent->signal->cmin_flt +=
- p->min_flt + p->signal->min_flt + p->signal->cmin_flt;
- p->parent->signal->cmaj_flt +=
- p->maj_flt + p->signal->maj_flt + p->signal->cmaj_flt;
- p->parent->signal->cnvcsw +=
- p->nvcsw + p->signal->nvcsw + p->signal->cnvcsw;
- p->parent->signal->cnivcsw +=
- p->nivcsw + p->signal->nivcsw + p->signal->cnivcsw;
+ cputime_add(sig->stime,
+ sig->cstime)));
+ psig->cmin_flt +=
+ p->min_flt + sig->min_flt + sig->cmin_flt;
+ psig->cmaj_flt +=
+ p->maj_flt + sig->maj_flt + sig->cmaj_flt;
+ psig->cnvcsw +=
+ p->nvcsw + sig->nvcsw + sig->cnvcsw;
+ psig->cnivcsw +=
+ p->nivcsw + sig->nivcsw + sig->cnivcsw;
spin_unlock_irq(&p->parent->sighand->siglock);
}
* the lock and this task is uninteresting. If we return nonzero, we have
* released the lock and the system call should return.
*/
-static int wait_task_stopped(task_t *p, int delayed_group_leader, int noreap,
- struct siginfo __user *infop,
+static int wait_task_stopped(struct task_struct *p, int delayed_group_leader,
+ int noreap, struct siginfo __user *infop,
int __user *stat_addr, struct rusage __user *ru)
{
int retval, exit_code;
exit_code = p->exit_code;
if (unlikely(!exit_code) ||
- unlikely(p->state > TASK_STOPPED))
+ unlikely(p->state & TASK_TRACED))
goto bail_ref;
return wait_noreap_copyout(p, pid, uid,
why, (exit_code << 8) | 0x7f,
/* move to end of parent's list to avoid starvation */
remove_parent(p);
- add_parent(p, p->parent);
+ add_parent(p);
write_unlock_irq(&tasklist_lock);
* the lock and this task is uninteresting. If we return nonzero, we have
* released the lock and the system call should return.
*/
-static int wait_task_continued(task_t *p, int noreap,
+static int wait_task_continued(struct task_struct *p, int noreap,
struct siginfo __user *infop,
int __user *stat_addr, struct rusage __user *ru)
{
int ret;
list_for_each(_p,&tsk->children) {
- p = list_entry(_p,struct task_struct,sibling);
+ p = list_entry(_p, struct task_struct, sibling);
ret = eligible_child(pid, options, p);
if (!ret)
switch (p->state) {
case TASK_TRACED:
+ /*
+ * When we hit the race with PTRACE_ATTACH,
+ * we will not report this child. But the
+ * race means it has not yet been moved to
+ * our ptrace_children list, so we need to
+ * set the flag here to avoid a spurious ECHILD
+ * when the race happens with the only child.
+ */
+ flag = 1;
if (!my_ptrace_child(p))
continue;
/*FALLTHROUGH*/
if (options & __WNOTHREAD)
break;
tsk = next_thread(tsk);
- if (tsk->signal != current->signal)
- BUG();
+ BUG_ON(tsk->signal != current->signal);
} while (tsk != current);
read_unlock(&tasklist_lock);