X-Git-Url: http://git.onelab.eu/?a=blobdiff_plain;ds=sidebyside;f=kernel%2Fexit.c;fp=kernel%2Fexit.c;h=208cbfa0987723699534ede7b6786ea111b0c53a;hb=64ba3f394c830ec48a1c31b53dcae312c56f1604;hp=17987bc019b35a5ebe039513c70706fb426ca1ee;hpb=be1e6109ac94a859551f8e1774eb9a8469fe055c;p=linux-2.6.git diff --git a/kernel/exit.c b/kernel/exit.c index 17987bc01..208cbfa09 100644 --- a/kernel/exit.c +++ b/kernel/exit.c @@ -4,6 +4,7 @@ * Copyright (C) 1991, 1992 Linus Torvalds */ +#include #include #include #include @@ -25,22 +26,15 @@ #include #include #include -#include -#include #include #include #include -#include #include #include -#include -#include -#include -#include /* for audit_free() */ -#include #include #include #include +#include #include #include @@ -50,103 +44,46 @@ extern void sem_exit (void); extern struct task_struct *child_reaper; +int getrusage(struct task_struct *, int, struct rusage __user *); + static void exit_mm(struct task_struct * tsk); static void __unhash_process(struct task_struct *p) { nr_threads--; detach_pid(p, PIDTYPE_PID); + detach_pid(p, PIDTYPE_TGID); if (thread_group_leader(p)) { detach_pid(p, PIDTYPE_PGID); detach_pid(p, PIDTYPE_SID); - - list_del_rcu(&p->tasks); - __get_cpu_var(process_counts)--; + if (p->pid) + __get_cpu_var(process_counts)--; } - list_del_rcu(&p->thread_group); - remove_parent(p); -} - -/* - * This function expects the tasklist_lock write-locked. - */ -static void __exit_signal(struct task_struct *tsk) -{ - struct signal_struct *sig = tsk->signal; - struct sighand_struct *sighand; - BUG_ON(!sig); - BUG_ON(!atomic_read(&sig->count)); - - rcu_read_lock(); - sighand = rcu_dereference(tsk->sighand); - spin_lock(&sighand->siglock); - - posix_cpu_timers_exit(tsk); - if (atomic_dec_and_test(&sig->count)) - posix_cpu_timers_exit_group(tsk); - else { - /* - * If there is any task waiting for the group exit - * then notify it: - */ - if (sig->group_exit_task && atomic_read(&sig->count) == sig->notify_count) { - wake_up_process(sig->group_exit_task); - sig->group_exit_task = NULL; - } - if (tsk == sig->curr_target) - sig->curr_target = next_thread(tsk); - /* - * Accumulate here the counters for all threads but the - * group leader as they die, so they can be added into - * the process-wide totals when those are taken. - * The group leader stays around as a zombie as long - * as there are other threads. When it gets reaped, - * the exit.c code will add its counts into these totals. - * We won't ever get here for the group leader, since it - * will have been the last reference on the signal_struct. - */ - sig->utime = cputime_add(sig->utime, tsk->utime); - sig->stime = cputime_add(sig->stime, tsk->stime); - sig->min_flt += tsk->min_flt; - sig->maj_flt += tsk->maj_flt; - sig->nvcsw += tsk->nvcsw; - sig->nivcsw += tsk->nivcsw; - sig->sched_time += tsk->sched_time; - sig = NULL; /* Marker for below. */ - } - - __unhash_process(tsk); - - tsk->signal = NULL; - tsk->sighand = NULL; - spin_unlock(&sighand->siglock); - rcu_read_unlock(); - - __cleanup_sighand(sighand); - clear_tsk_thread_flag(tsk,TIF_SIGPENDING); - flush_sigqueue(&tsk->pending); - if (sig) { - flush_sigqueue(&sig->shared_pending); - __cleanup_signal(sig); - } -} - -static void delayed_put_task_struct(struct rcu_head *rhp) -{ - put_task_struct(container_of(rhp, struct task_struct, rcu)); + REMOVE_LINKS(p); } void release_task(struct task_struct * p) { - struct task_struct *leader; int zap_leader; -repeat: + task_t *leader; + struct dentry *proc_dentry; + +repeat: atomic_dec(&p->user->processes); + spin_lock(&p->proc_lock); + proc_dentry = proc_pid_unhash(p); write_lock_irq(&tasklist_lock); - ptrace_unlink(p); + if (unlikely(p->ptrace)) + __ptrace_unlink(p); BUG_ON(!list_empty(&p->ptrace_list) || !list_empty(&p->ptrace_children)); __exit_signal(p); + /* + * Note that the fastpath in sys_times depends on __exit_signal having + * updated the counters before a task is removed from the tasklist of + * the process by __unhash_process. + */ + __unhash_process(p); /* * If we are the last non-leader member of the thread @@ -171,15 +108,31 @@ repeat: sched_exit(p); write_unlock_irq(&tasklist_lock); - proc_flush_task(p); + spin_unlock(&p->proc_lock); + proc_pid_flush(proc_dentry); release_thread(p); - call_rcu(&p->rcu, delayed_put_task_struct); + put_task_struct(p); p = leader; if (unlikely(zap_leader)) goto repeat; } +/* we are using it only for SMP init */ + +void unhash_process(struct task_struct *p) +{ + struct dentry *proc_dentry; + + spin_lock(&p->proc_lock); + proc_dentry = proc_pid_unhash(p); + write_lock_irq(&tasklist_lock); + __unhash_process(p); + write_unlock_irq(&tasklist_lock); + spin_unlock(&p->proc_lock); + proc_pid_flush(proc_dentry); +} + /* * This checks not only the pgrp, but falls back on the pid if no * satisfactory pgrp is found. I dunno - gdb doesn't work correctly @@ -214,7 +167,7 @@ out: * * "I ask you, have you ever known what it is to be an orphan?" */ -static int will_become_orphaned_pgrp(int pgrp, struct task_struct *ignored_task) +static int will_become_orphaned_pgrp(int pgrp, task_t *ignored_task) { struct task_struct *p; int ret = 1; @@ -287,10 +240,10 @@ static void reparent_to_init(void) ptrace_unlink(current); /* Reparent to init */ - remove_parent(current); + REMOVE_LINKS(current); current->parent = child_reaper; current->real_parent = child_reaper; - add_parent(current); + SET_LINKS(current); /* Set the exit signal to SIGCHLD so we signal init on exit */ current->exit_signal = SIGCHLD; @@ -396,7 +349,9 @@ void daemonize(const char *name, ...) exit_mm(current); set_special_pids(1, 1); - proc_clear_tty(current); + down(&tty_sem); + current->signal->tty = NULL; + up(&tty_sem); /* Block and flush all signals */ sigfillset(&blocked); @@ -449,7 +404,6 @@ static void close_files(struct files_struct * files) } i++; set >>= 1; - cond_resched(); } } } @@ -577,7 +531,7 @@ static void exit_mm(struct task_struct * tsk) down_read(&mm->mmap_sem); } atomic_inc(&mm->mm_count); - BUG_ON(mm != tsk->active_mm); + if (mm != tsk->active_mm) BUG(); /* more a memory barrier than a real lock */ task_lock(tsk); tsk->mm = NULL; @@ -587,8 +541,7 @@ static void exit_mm(struct task_struct * tsk) mmput(mm); } -static inline void -choose_new_parent(struct task_struct *p, struct task_struct *reaper) +static inline void choose_new_parent(task_t *p, task_t *reaper) { /* check for reaper context */ vxwprintk((p->xid != reaper->xid) && (reaper != child_reaper), @@ -599,12 +552,11 @@ choose_new_parent(struct task_struct *p, struct task_struct *reaper) * Make sure we're not reparenting to ourselves and that * the parent is not a zombie. */ - BUG_ON(p == reaper || reaper->exit_state); + BUG_ON(p == reaper || reaper->exit_state >= EXIT_ZOMBIE); p->real_parent = reaper; } -static void -reparent_thread(struct task_struct *p, struct task_struct *father, int traced) +static void reparent_thread(task_t *p, task_t *father, int traced) { /* We don't want people slaying init. */ if (p->exit_signal != -1) @@ -625,9 +577,9 @@ reparent_thread(struct task_struct *p, struct task_struct *father, int traced) * anyway, so let go of it. */ p->ptrace = 0; - remove_parent(p); + list_del_init(&p->sibling); p->parent = p->real_parent; - add_parent(p); + list_add_tail(&p->sibling, &p->parent->children); /* If we'd notified the old parent about this child's death, * also notify the new parent. @@ -668,8 +620,8 @@ reparent_thread(struct task_struct *p, struct task_struct *father, int traced) * group, and if no such member exists, give it to * the global child reaper process (ie "init") */ -static void -forget_original_parent(struct task_struct *father, struct list_head *to_release) +static void forget_original_parent(struct task_struct * father, + struct list_head *to_release) { struct task_struct *p, *reaper = father; struct list_head *_p, *_n; @@ -692,7 +644,7 @@ forget_original_parent(struct task_struct *father, struct list_head *to_release) */ list_for_each_safe(_p, _n, &father->children) { int ptrace; - p = list_entry(_p, struct task_struct, sibling); + p = list_entry(_p,struct task_struct,sibling); ptrace = p->ptrace; @@ -721,7 +673,8 @@ forget_original_parent(struct task_struct *father, struct list_head *to_release) list_add(&p->ptrace_list, to_release); } list_for_each_safe(_p, _n, &father->ptrace_children) { - p = list_entry(_p, struct task_struct, ptrace_list); + p = list_entry(_p,struct task_struct,ptrace_list); + choose_new_parent(p, reaper); reparent_thread(p, father, 1); } @@ -841,7 +794,7 @@ static void exit_notify(struct task_struct *tsk) list_for_each_safe(_p, _n, &ptrace_dead) { list_del_init(_p); - t = list_entry(_p, struct task_struct, ptrace_list); + t = list_entry(_p,struct task_struct,ptrace_list); release_task(t); } @@ -853,9 +806,7 @@ static void exit_notify(struct task_struct *tsk) fastcall NORET_TYPE void do_exit(long code) { struct task_struct *tsk = current; - struct taskstats *tidstats; int group_dead; - unsigned int mycpu; profile_task_exit(tsk); @@ -865,8 +816,10 @@ fastcall NORET_TYPE void do_exit(long code) panic("Aiee, killing interrupt handler!"); if (unlikely(!tsk->pid)) panic("Attempted to kill the idle task!"); - if (unlikely(tsk == child_reaper)) + if (unlikely(tsk->pid == 1)) panic("Attempted to kill init!"); + if (tsk->io_context) + exit_io_context(); if (unlikely(current->ptrace & PT_TRACE_EXIT)) { current->ptrace_message = code; @@ -880,8 +833,6 @@ fastcall NORET_TYPE void do_exit(long code) if (unlikely(tsk->flags & PF_EXITING)) { printk(KERN_ALERT "Fixing recursive fault but reboot is needed!\n"); - if (tsk->io_context) - exit_io_context(); set_current_state(TASK_UNINTERRUPTIBLE); schedule(); } @@ -893,8 +844,6 @@ fastcall NORET_TYPE void do_exit(long code) current->comm, current->pid, preempt_count()); - taskstats_exit_alloc(&tidstats, &mycpu); - acct_update_integrals(tsk); if (tsk->mm) { update_hiwater_rss(tsk->mm); @@ -904,32 +853,10 @@ fastcall NORET_TYPE void do_exit(long code) if (group_dead) { hrtimer_cancel(&tsk->signal->real_timer); exit_itimers(tsk->signal); + acct_process(code); } - - if (current->tux_info) { -#ifdef CONFIG_TUX_DEBUG - printk("Possibly unexpected TUX-thread exit(%ld) at %p?\n", - code, __builtin_return_address(0)); -#endif - current->tux_exit(); - } - - acct_collect(code, group_dead); - if (unlikely(tsk->robust_list)) - exit_robust_list(tsk); -#if defined(CONFIG_FUTEX) && defined(CONFIG_COMPAT) - if (unlikely(tsk->compat_robust_list)) - compat_exit_robust_list(tsk); -#endif - if (unlikely(tsk->audit_context)) - audit_free(tsk); - taskstats_exit_send(tsk, tidstats, group_dead, mycpu); - taskstats_exit_free(tidstats); - exit_mm(tsk); - if (group_dead) - acct_process(); exit_sem(tsk); __exit_files(tsk); __exit_fs(tsk); @@ -955,23 +882,9 @@ fastcall NORET_TYPE void do_exit(long code) tsk->mempolicy = NULL; #endif /* - * This must happen late, after the PID is not - * hashed anymore: - */ - if (unlikely(!list_empty(&tsk->pi_state_list))) - exit_pi_state_list(tsk); - if (unlikely(current->pi_state_cache)) - kfree(current->pi_state_cache); - /* - * Make sure we are holding no locks: + * If DEBUG_MUTEXES is on, make sure we are holding no locks: */ - debug_check_no_locks_held(tsk); - - if (tsk->io_context) - exit_io_context(); - - if (tsk->splice_pipe) - __free_pipe_info(tsk->splice_pipe); + mutex_debug_check_no_locks_held(tsk); /* needs to stay after exit_notify() */ exit_vx_info(tsk, code); @@ -1005,6 +918,13 @@ asmlinkage long sys_exit(int error_code) do_exit((error_code&0xff)<<8); } +task_t fastcall *next_thread(const task_t *p) +{ + return pid_task(p->pids[PIDTYPE_TGID].pid_list.next, PIDTYPE_TGID); +} + +EXPORT_SYMBOL(next_thread); + /* * Take down every thread in the group. This is called by fatal signals * as well as by sys_exit_group (below). @@ -1019,6 +939,7 @@ do_group_exit(int exit_code) else if (!thread_group_empty(current)) { struct signal_struct *const sig = current->signal; struct sighand_struct *const sighand = current->sighand; + read_lock(&tasklist_lock); spin_lock_irq(&sighand->siglock); if (sig->flags & SIGNAL_GROUP_EXIT) /* Another thread got here before we took the lock. */ @@ -1028,6 +949,7 @@ do_group_exit(int exit_code) zap_other_threads(current); } spin_unlock_irq(&sighand->siglock); + read_unlock(&tasklist_lock); } do_exit(exit_code); @@ -1044,7 +966,7 @@ asmlinkage void sys_exit_group(int error_code) do_group_exit((error_code & 0xff) << 8); } -static int eligible_child(pid_t pid, int options, struct task_struct *p) +static int eligible_child(pid_t pid, int options, task_t *p) { if (pid > 0) { if (p->pid != pid) @@ -1076,7 +998,7 @@ static int eligible_child(pid_t pid, int options, struct task_struct *p) * Do not consider thread group leaders that are * in a non-empty thread group: */ - if (delay_group_leader(p)) + if (current->tgid != p->tgid && delay_group_leader(p)) return 2; if (security_task_wait(p)) @@ -1085,13 +1007,12 @@ static int eligible_child(pid_t pid, int options, struct task_struct *p) return 1; } -static int wait_noreap_copyout(struct task_struct *p, pid_t pid, uid_t uid, +static int wait_noreap_copyout(task_t *p, pid_t pid, uid_t uid, int why, int status, struct siginfo __user *infop, struct rusage __user *rusagep) { int retval = rusagep ? getrusage(p, RUSAGE_BOTH, rusagep) : 0; - put_task_struct(p); if (!retval) retval = put_user(SIGCHLD, &infop->si_signo); @@ -1116,7 +1037,7 @@ static int wait_noreap_copyout(struct task_struct *p, pid_t pid, uid_t uid, * the lock and this task is uninteresting. If we return nonzero, we have * released the lock and the system call should return. */ -static int wait_task_zombie(struct task_struct *p, int noreap, +static int wait_task_zombie(task_t *p, int noreap, struct siginfo __user *infop, int __user *stat_addr, struct rusage __user *ru) { @@ -1278,8 +1199,8 @@ static int wait_task_zombie(struct task_struct *p, int noreap, * the lock and this task is uninteresting. If we return nonzero, we have * released the lock and the system call should return. */ -static int wait_task_stopped(struct task_struct *p, int delayed_group_leader, - int noreap, struct siginfo __user *infop, +static int wait_task_stopped(task_t *p, int delayed_group_leader, int noreap, + struct siginfo __user *infop, int __user *stat_addr, struct rusage __user *ru) { int retval, exit_code; @@ -1358,7 +1279,7 @@ bail_ref: /* move to end of parent's list to avoid starvation */ remove_parent(p); - add_parent(p); + add_parent(p, p->parent); write_unlock_irq(&tasklist_lock); @@ -1393,7 +1314,7 @@ bail_ref: * the lock and this task is uninteresting. If we return nonzero, we have * released the lock and the system call should return. */ -static int wait_task_continued(struct task_struct *p, int noreap, +static int wait_task_continued(task_t *p, int noreap, struct siginfo __user *infop, int __user *stat_addr, struct rusage __user *ru) { @@ -1479,7 +1400,7 @@ repeat: int ret; list_for_each(_p,&tsk->children) { - p = list_entry(_p, struct task_struct, sibling); + p = list_entry(_p,struct task_struct,sibling); ret = eligible_child(pid, options, p); if (!ret) @@ -1568,7 +1489,8 @@ check_continued: if (options & __WNOTHREAD) break; tsk = next_thread(tsk); - BUG_ON(tsk->signal != current->signal); + if (tsk->signal != current->signal) + BUG(); } while (tsk != current); read_unlock(&tasklist_lock);