X-Git-Url: http://git.onelab.eu/?a=blobdiff_plain;ds=sidebyside;f=kernel%2Fsys.c;fp=kernel%2Fsys.c;h=56dffa5a97165bfe232f443d66d087952294b48c;hb=64ba3f394c830ec48a1c31b53dcae312c56f1604;hp=6195e5fe95ee0f500ebf143f2ec839bdad65f882;hpb=be1e6109ac94a859551f8e1774eb9a8469fe055c;p=linux-2.6.git diff --git a/kernel/sys.c b/kernel/sys.c index 6195e5fe9..56dffa5a9 100644 --- a/kernel/sys.c +++ b/kernel/sys.c @@ -4,6 +4,7 @@ * Copyright (C) 1991, 1992 Linus Torvalds */ +#include #include #include #include @@ -13,6 +14,7 @@ #include #include #include +#include #include #include #include @@ -57,12 +59,6 @@ #ifndef GET_FPEXC_CTL # define GET_FPEXC_CTL(a,b) (-EINVAL) #endif -#ifndef GET_ENDIAN -# define GET_ENDIAN(a,b) (-EINVAL) -#endif -#ifndef SET_ENDIAN -# define SET_ENDIAN(a,b) (-EINVAL) -#endif /* * this is where the system-wide overflow UID and GID are defined, for @@ -101,305 +97,99 @@ int cad_pid = 1; * and the like. */ -static BLOCKING_NOTIFIER_HEAD(reboot_notifier_list); - -/* - * Notifier chain core routines. The exported routines below - * are layered on top of these, with appropriate locking added. - */ - -static int notifier_chain_register(struct notifier_block **nl, - struct notifier_block *n) -{ - while ((*nl) != NULL) { - if (n->priority > (*nl)->priority) - break; - nl = &((*nl)->next); - } - n->next = *nl; - rcu_assign_pointer(*nl, n); - return 0; -} - -static int notifier_chain_unregister(struct notifier_block **nl, - struct notifier_block *n) -{ - while ((*nl) != NULL) { - if ((*nl) == n) { - rcu_assign_pointer(*nl, n->next); - return 0; - } - nl = &((*nl)->next); - } - return -ENOENT; -} - -static int __kprobes notifier_call_chain(struct notifier_block **nl, - unsigned long val, void *v) -{ - int ret = NOTIFY_DONE; - struct notifier_block *nb, *next_nb; - - nb = rcu_dereference(*nl); - while (nb) { - next_nb = rcu_dereference(nb->next); - ret = nb->notifier_call(nb, val, v); - if ((ret & NOTIFY_STOP_MASK) == NOTIFY_STOP_MASK) - break; - nb = next_nb; - } - return ret; -} - -/* - * Atomic notifier chain routines. Registration and unregistration - * use a mutex, and call_chain is synchronized by RCU (no locks). - */ +static struct notifier_block *reboot_notifier_list; +static DEFINE_RWLOCK(notifier_lock); /** - * atomic_notifier_chain_register - Add notifier to an atomic notifier chain - * @nh: Pointer to head of the atomic notifier chain + * notifier_chain_register - Add notifier to a notifier chain + * @list: Pointer to root list pointer * @n: New entry in notifier chain * - * Adds a notifier to an atomic notifier chain. + * Adds a notifier to a notifier chain. * * Currently always returns zero. */ - -int atomic_notifier_chain_register(struct atomic_notifier_head *nh, - struct notifier_block *n) -{ - unsigned long flags; - int ret; - - spin_lock_irqsave(&nh->lock, flags); - ret = notifier_chain_register(&nh->head, n); - spin_unlock_irqrestore(&nh->lock, flags); - return ret; -} - -EXPORT_SYMBOL_GPL(atomic_notifier_chain_register); - -/** - * atomic_notifier_chain_unregister - Remove notifier from an atomic notifier chain - * @nh: Pointer to head of the atomic notifier chain - * @n: Entry to remove from notifier chain - * - * Removes a notifier from an atomic notifier chain. - * - * Returns zero on success or %-ENOENT on failure. - */ -int atomic_notifier_chain_unregister(struct atomic_notifier_head *nh, - struct notifier_block *n) -{ - unsigned long flags; - int ret; - - spin_lock_irqsave(&nh->lock, flags); - ret = notifier_chain_unregister(&nh->head, n); - spin_unlock_irqrestore(&nh->lock, flags); - synchronize_rcu(); - return ret; -} - -EXPORT_SYMBOL_GPL(atomic_notifier_chain_unregister); - -/** - * atomic_notifier_call_chain - Call functions in an atomic notifier chain - * @nh: Pointer to head of the atomic notifier chain - * @val: Value passed unmodified to notifier function - * @v: Pointer passed unmodified to notifier function - * - * Calls each function in a notifier chain in turn. The functions - * run in an atomic context, so they must not block. - * This routine uses RCU to synchronize with changes to the chain. - * - * If the return value of the notifier can be and'ed - * with %NOTIFY_STOP_MASK then atomic_notifier_call_chain - * will return immediately, with the return value of - * the notifier function which halted execution. - * Otherwise the return value is the return value - * of the last notifier function called. - */ -int atomic_notifier_call_chain(struct atomic_notifier_head *nh, - unsigned long val, void *v) +int notifier_chain_register(struct notifier_block **list, struct notifier_block *n) { - int ret; - - rcu_read_lock(); - ret = notifier_call_chain(&nh->head, val, v); - rcu_read_unlock(); - return ret; + write_lock(¬ifier_lock); + while(*list) + { + if(n->priority > (*list)->priority) + break; + list= &((*list)->next); + } + n->next = *list; + *list=n; + write_unlock(¬ifier_lock); + return 0; } -EXPORT_SYMBOL_GPL(atomic_notifier_call_chain); - -/* - * Blocking notifier chain routines. All access to the chain is - * synchronized by an rwsem. - */ +EXPORT_SYMBOL(notifier_chain_register); /** - * blocking_notifier_chain_register - Add notifier to a blocking notifier chain - * @nh: Pointer to head of the blocking notifier chain + * notifier_chain_unregister - Remove notifier from a notifier chain + * @nl: Pointer to root list pointer * @n: New entry in notifier chain * - * Adds a notifier to a blocking notifier chain. - * Must be called in process context. + * Removes a notifier from a notifier chain. * - * Currently always returns zero. + * Returns zero on success, or %-ENOENT on failure. */ -int blocking_notifier_chain_register(struct blocking_notifier_head *nh, - struct notifier_block *n) -{ - int ret; - - /* - * This code gets used during boot-up, when task switching is - * not yet working and interrupts must remain disabled. At - * such times we must not call down_write(). - */ - if (unlikely(system_state == SYSTEM_BOOTING)) - return notifier_chain_register(&nh->head, n); - - down_write(&nh->rwsem); - ret = notifier_chain_register(&nh->head, n); - up_write(&nh->rwsem); - return ret; -} - -EXPORT_SYMBOL_GPL(blocking_notifier_chain_register); - -/** - * blocking_notifier_chain_unregister - Remove notifier from a blocking notifier chain - * @nh: Pointer to head of the blocking notifier chain - * @n: Entry to remove from notifier chain - * - * Removes a notifier from a blocking notifier chain. - * Must be called from process context. - * - * Returns zero on success or %-ENOENT on failure. - */ -int blocking_notifier_chain_unregister(struct blocking_notifier_head *nh, - struct notifier_block *n) +int notifier_chain_unregister(struct notifier_block **nl, struct notifier_block *n) { - int ret; - - /* - * This code gets used during boot-up, when task switching is - * not yet working and interrupts must remain disabled. At - * such times we must not call down_write(). - */ - if (unlikely(system_state == SYSTEM_BOOTING)) - return notifier_chain_unregister(&nh->head, n); - - down_write(&nh->rwsem); - ret = notifier_chain_unregister(&nh->head, n); - up_write(&nh->rwsem); - return ret; + write_lock(¬ifier_lock); + while((*nl)!=NULL) + { + if((*nl)==n) + { + *nl=n->next; + write_unlock(¬ifier_lock); + return 0; + } + nl=&((*nl)->next); + } + write_unlock(¬ifier_lock); + return -ENOENT; } -EXPORT_SYMBOL_GPL(blocking_notifier_chain_unregister); +EXPORT_SYMBOL(notifier_chain_unregister); /** - * blocking_notifier_call_chain - Call functions in a blocking notifier chain - * @nh: Pointer to head of the blocking notifier chain + * notifier_call_chain - Call functions in a notifier chain + * @n: Pointer to root pointer of notifier chain * @val: Value passed unmodified to notifier function * @v: Pointer passed unmodified to notifier function * - * Calls each function in a notifier chain in turn. The functions - * run in a process context, so they are allowed to block. + * Calls each function in a notifier chain in turn. * - * If the return value of the notifier can be and'ed - * with %NOTIFY_STOP_MASK then blocking_notifier_call_chain + * If the return value of the notifier can be and'd + * with %NOTIFY_STOP_MASK, then notifier_call_chain * will return immediately, with the return value of * the notifier function which halted execution. - * Otherwise the return value is the return value + * Otherwise, the return value is the return value * of the last notifier function called. */ -int blocking_notifier_call_chain(struct blocking_notifier_head *nh, - unsigned long val, void *v) +int __kprobes notifier_call_chain(struct notifier_block **n, unsigned long val, void *v) { - int ret; + int ret=NOTIFY_DONE; + struct notifier_block *nb = *n; - down_read(&nh->rwsem); - ret = notifier_call_chain(&nh->head, val, v); - up_read(&nh->rwsem); + while(nb) + { + ret=nb->notifier_call(nb,val,v); + if(ret&NOTIFY_STOP_MASK) + { + return ret; + } + nb=nb->next; + } return ret; } -EXPORT_SYMBOL_GPL(blocking_notifier_call_chain); - -/* - * Raw notifier chain routines. There is no protection; - * the caller must provide it. Use at your own risk! - */ - -/** - * raw_notifier_chain_register - Add notifier to a raw notifier chain - * @nh: Pointer to head of the raw notifier chain - * @n: New entry in notifier chain - * - * Adds a notifier to a raw notifier chain. - * All locking must be provided by the caller. - * - * Currently always returns zero. - */ - -int raw_notifier_chain_register(struct raw_notifier_head *nh, - struct notifier_block *n) -{ - return notifier_chain_register(&nh->head, n); -} - -EXPORT_SYMBOL_GPL(raw_notifier_chain_register); - -/** - * raw_notifier_chain_unregister - Remove notifier from a raw notifier chain - * @nh: Pointer to head of the raw notifier chain - * @n: Entry to remove from notifier chain - * - * Removes a notifier from a raw notifier chain. - * All locking must be provided by the caller. - * - * Returns zero on success or %-ENOENT on failure. - */ -int raw_notifier_chain_unregister(struct raw_notifier_head *nh, - struct notifier_block *n) -{ - return notifier_chain_unregister(&nh->head, n); -} - -EXPORT_SYMBOL_GPL(raw_notifier_chain_unregister); - -/** - * raw_notifier_call_chain - Call functions in a raw notifier chain - * @nh: Pointer to head of the raw notifier chain - * @val: Value passed unmodified to notifier function - * @v: Pointer passed unmodified to notifier function - * - * Calls each function in a notifier chain in turn. The functions - * run in an undefined context. - * All locking must be provided by the caller. - * - * If the return value of the notifier can be and'ed - * with %NOTIFY_STOP_MASK then raw_notifier_call_chain - * will return immediately, with the return value of - * the notifier function which halted execution. - * Otherwise the return value is the return value - * of the last notifier function called. - */ - -int raw_notifier_call_chain(struct raw_notifier_head *nh, - unsigned long val, void *v) -{ - return notifier_call_chain(&nh->head, val, v); -} - -EXPORT_SYMBOL_GPL(raw_notifier_call_chain); +EXPORT_SYMBOL(notifier_call_chain); /** * register_reboot_notifier - Register function to be called at reboot time @@ -408,13 +198,13 @@ EXPORT_SYMBOL_GPL(raw_notifier_call_chain); * Registers a function with the list of functions * to be called at reboot time. * - * Currently always returns zero, as blocking_notifier_chain_register + * Currently always returns zero, as notifier_chain_register * always returns zero. */ int register_reboot_notifier(struct notifier_block * nb) { - return blocking_notifier_chain_register(&reboot_notifier_list, nb); + return notifier_chain_register(&reboot_notifier_list, nb); } EXPORT_SYMBOL(register_reboot_notifier); @@ -431,11 +221,25 @@ EXPORT_SYMBOL(register_reboot_notifier); int unregister_reboot_notifier(struct notifier_block * nb) { - return blocking_notifier_chain_unregister(&reboot_notifier_list, nb); + return notifier_chain_unregister(&reboot_notifier_list, nb); } EXPORT_SYMBOL(unregister_reboot_notifier); +#ifndef CONFIG_SECURITY +int capable(int cap) +{ + if (vx_check_bit(VXC_CAP_MASK, cap) && !vx_mcaps(1L << cap)) + return 0; + if (cap_raised(current->cap_effective, cap)) { + current->flags |= PF_SUPERPRIV; + return 1; + } + return 0; +} +EXPORT_SYMBOL(capable); +#endif + static int set_one_prio(struct task_struct *p, int niceval, int error) { int no_nice; @@ -595,9 +399,9 @@ void emergency_restart(void) } EXPORT_SYMBOL_GPL(emergency_restart); -static void kernel_restart_prepare(char *cmd) +void kernel_restart_prepare(char *cmd) { - blocking_notifier_call_chain(&reboot_notifier_list, SYS_RESTART, cmd); + notifier_call_chain(&reboot_notifier_list, SYS_RESTART, cmd); system_state = SYSTEM_RESTART; device_shutdown(); } @@ -629,7 +433,7 @@ EXPORT_SYMBOL_GPL(kernel_restart); * Move into place and start executing a preloaded standalone * executable. If nothing was preloaded return an error. */ -static void kernel_kexec(void) +void kernel_kexec(void) { #ifdef CONFIG_KEXEC struct kimage *image; @@ -643,10 +447,11 @@ static void kernel_kexec(void) machine_kexec(image); #endif } +EXPORT_SYMBOL_GPL(kernel_kexec); void kernel_shutdown_prepare(enum system_states state) { - blocking_notifier_call_chain(&reboot_notifier_list, + notifier_call_chain(&reboot_notifier_list, (state == SYSTEM_HALT)?SYS_HALT:SYS_POWER_OFF, NULL); system_state = state; device_shutdown(); @@ -1219,24 +1024,69 @@ asmlinkage long sys_times(struct tms __user * tbuf) */ if (tbuf) { struct tms tmp; - struct task_struct *tsk = current; - struct task_struct *t; cputime_t utime, stime, cutime, cstime; - spin_lock_irq(&tsk->sighand->siglock); - utime = tsk->signal->utime; - stime = tsk->signal->stime; - t = tsk; - do { - utime = cputime_add(utime, t->utime); - stime = cputime_add(stime, t->stime); - t = next_thread(t); - } while (t != tsk); +#ifdef CONFIG_SMP + if (thread_group_empty(current)) { + /* + * Single thread case without the use of any locks. + * + * We may race with release_task if two threads are + * executing. However, release task first adds up the + * counters (__exit_signal) before removing the task + * from the process tasklist (__unhash_process). + * __exit_signal also acquires and releases the + * siglock which results in the proper memory ordering + * so that the list modifications are always visible + * after the counters have been updated. + * + * If the counters have been updated by the second thread + * but the thread has not yet been removed from the list + * then the other branch will be executing which will + * block on tasklist_lock until the exit handling of the + * other task is finished. + * + * This also implies that the sighand->siglock cannot + * be held by another processor. So we can also + * skip acquiring that lock. + */ + utime = cputime_add(current->signal->utime, current->utime); + stime = cputime_add(current->signal->utime, current->stime); + cutime = current->signal->cutime; + cstime = current->signal->cstime; + } else +#endif + { + + /* Process with multiple threads */ + struct task_struct *tsk = current; + struct task_struct *t; - cutime = tsk->signal->cutime; - cstime = tsk->signal->cstime; - spin_unlock_irq(&tsk->sighand->siglock); + read_lock(&tasklist_lock); + utime = tsk->signal->utime; + stime = tsk->signal->stime; + t = tsk; + do { + utime = cputime_add(utime, t->utime); + stime = cputime_add(stime, t->stime); + t = next_thread(t); + } while (t != tsk); + /* + * While we have tasklist_lock read-locked, no dying thread + * can be updating current->signal->[us]time. Instead, + * we got their counts included in the live thread loop. + * However, another thread can come in right now and + * do a wait call that updates current->signal->c[us]time. + * To make sure we always see that pair updated atomically, + * we take the siglock around fetching them. + */ + spin_lock_irq(&tsk->sighand->siglock); + cutime = tsk->signal->cutime; + cstime = tsk->signal->cstime; + spin_unlock_irq(&tsk->sighand->siglock); + read_unlock(&tasklist_lock); + } tmp.tms_utime = cputime_to_clock_t(utime); tmp.tms_stime = cputime_to_clock_t(stime); tmp.tms_cutime = cputime_to_clock_t(cutime); @@ -1392,37 +1242,24 @@ asmlinkage long sys_getsid(pid_t pid) asmlinkage long sys_setsid(void) { struct task_struct *group_leader = current->group_leader; - pid_t session; + struct pid *pid; int err = -EPERM; + down(&tty_sem); write_lock_irq(&tasklist_lock); - /* Fail if I am already a session leader */ - if (group_leader->signal->leader) - goto out; - - session = group_leader->pid; - /* Fail if a process group id already exists that equals the - * proposed session id. - * - * Don't check if session id == 1 because kernel threads use this - * session id and so the check will always fail and make it so - * init cannot successfully call setsid. - */ - if (session > 1 && find_task_by_pid_type(PIDTYPE_PGID, session)) + pid = find_pid(PIDTYPE_PGID, group_leader->pid); + if (pid) goto out; group_leader->signal->leader = 1; - __set_special_pids(session, session); - - spin_lock(&group_leader->sighand->siglock); + __set_special_pids(group_leader->pid, group_leader->pid); group_leader->signal->tty = NULL; group_leader->signal->tty_old_pgrp = 0; - spin_unlock(&group_leader->sighand->siglock); - err = process_group(group_leader); out: write_unlock_irq(&tasklist_lock); + up(&tty_sem); return err; } @@ -1556,7 +1393,7 @@ static void groups_sort(struct group_info *group_info) /* a simple bsearch */ int groups_search(struct group_info *group_info, gid_t grp) { - unsigned int left, right; + int left, right; if (!group_info) return 0; @@ -1564,7 +1401,7 @@ int groups_search(struct group_info *group_info, gid_t grp) left = 0; right = group_info->ngroups; while (left < right) { - unsigned int mid = (left+right)/2; + int mid = (left+right)/2; int cmp = grp - GROUP_AT(group_info, mid); if (cmp > 0) left = mid + 1; @@ -1614,6 +1451,7 @@ asmlinkage long sys_getgroups(int gidsetsize, gid_t __user *grouplist) return -EINVAL; /* no need to grab task_lock here; it cannot change */ + get_group_info(current->group_info); i = current->group_info->ngroups; if (gidsetsize) { if (i > gidsetsize) { @@ -1626,6 +1464,7 @@ asmlinkage long sys_getgroups(int gidsetsize, gid_t __user *grouplist) } } out: + put_group_info(current->group_info); return i; } @@ -1666,7 +1505,9 @@ int in_group_p(gid_t grp) { int retval = 1; if (grp != current->fsgid) { + get_group_info(current->group_info); retval = groups_search(current->group_info, grp); + put_group_info(current->group_info); } return retval; } @@ -1677,7 +1518,9 @@ int in_egroup_p(gid_t grp) { int retval = 1; if (grp != current->egid) { + get_group_info(current->group_info); retval = groups_search(current->group_info, grp); + put_group_info(current->group_info); } return retval; } @@ -1811,21 +1654,20 @@ asmlinkage long sys_old_getrlimit(unsigned int resource, struct rlimit __user *r asmlinkage long sys_setrlimit(unsigned int resource, struct rlimit __user *rlim) { struct rlimit new_rlim, *old_rlim; - unsigned long it_prof_secs; int retval; if (resource >= RLIM_NLIMITS) return -EINVAL; - if (copy_from_user(&new_rlim, rlim, sizeof(*rlim))) + if(copy_from_user(&new_rlim, rlim, sizeof(*rlim))) return -EFAULT; - if (new_rlim.rlim_cur > new_rlim.rlim_max) - return -EINVAL; + if (new_rlim.rlim_cur > new_rlim.rlim_max) + return -EINVAL; old_rlim = current->signal->rlim + resource; if ((new_rlim.rlim_max > old_rlim->rlim_max) && !vx_capable(CAP_SYS_RESOURCE, VXC_SET_RLIMIT)) return -EPERM; if (resource == RLIMIT_NOFILE && new_rlim.rlim_max > NR_OPEN) - return -EPERM; + return -EPERM; retval = security_task_setrlimit(resource, &new_rlim); if (retval) @@ -1835,20 +1677,10 @@ asmlinkage long sys_setrlimit(unsigned int resource, struct rlimit __user *rlim) *old_rlim = new_rlim; task_unlock(current->group_leader); - if (resource != RLIMIT_CPU) - goto out; - - /* - * RLIMIT_CPU handling. Note that the kernel fails to return an error - * code if it rejected the user's attempt to set RLIMIT_CPU. This is a - * very long-standing error, and fixing it now risks breakage of - * applications, so we live with it - */ - if (new_rlim.rlim_cur == RLIM_INFINITY) - goto out; - - it_prof_secs = cputime_to_secs(current->signal->it_prof_expires); - if (it_prof_secs == 0 || new_rlim.rlim_cur <= it_prof_secs) { + if (resource == RLIMIT_CPU && new_rlim.rlim_cur != RLIM_INFINITY && + (cputime_eq(current->signal->it_prof_expires, cputime_zero) || + new_rlim.rlim_cur <= cputime_to_secs( + current->signal->it_prof_expires))) { unsigned long rlim_cur = new_rlim.rlim_cur; cputime_t cputime; @@ -1864,11 +1696,12 @@ asmlinkage long sys_setrlimit(unsigned int resource, struct rlimit __user *rlim) cputime = secs_to_cputime(rlim_cur); read_lock(&tasklist_lock); spin_lock_irq(¤t->sighand->siglock); - set_process_cpu_timer(current, CPUCLOCK_PROF, &cputime, NULL); + set_process_cpu_timer(current, CPUCLOCK_PROF, + &cputime, NULL); spin_unlock_irq(¤t->sighand->siglock); read_unlock(&tasklist_lock); } -out: + return 0; } @@ -1880,6 +1713,9 @@ out: * a lot simpler! (Which we're not doing right now because we're not * measuring them yet). * + * This expects to be called with tasklist_lock read-locked or better, + * and the siglock not locked. It may momentarily take the siglock. + * * When sampling multiple threads for RUSAGE_SELF, under SMP we might have * races with threads incrementing their own counters. But since word * reads are atomic, we either get new values or old values and we don't @@ -1887,22 +1723,6 @@ out: * the c* fields from p->signal from races with exit.c updating those * fields when reaping, so a sample either gets all the additions of a * given child after it's reaped, or none so this sample is before reaping. - * - * Locking: - * We need to take the siglock for CHILDEREN, SELF and BOTH - * for the cases current multithreaded, non-current single threaded - * non-current multithreaded. Thread traversal is now safe with - * the siglock held. - * Strictly speaking, we donot need to take the siglock if we are current and - * single threaded, as no one else can take our signal_struct away, no one - * else can reap the children to update signal->c* counters, and no one else - * can race with the signal-> fields. If we do not take any lock, the - * signal-> fields could be read out of order while another thread was just - * exiting. So we should place a read memory barrier when we avoid the lock. - * On the writer side, write memory barrier is implied in __exit_signal - * as __exit_signal releases the siglock spinlock after updating the signal-> - * fields. But we don't do this yet to keep things simple. - * */ static void k_getrusage(struct task_struct *p, int who, struct rusage *r) @@ -1912,23 +1732,23 @@ static void k_getrusage(struct task_struct *p, int who, struct rusage *r) cputime_t utime, stime; memset((char *) r, 0, sizeof *r); - utime = stime = cputime_zero; - rcu_read_lock(); - if (!lock_task_sighand(p, &flags)) { - rcu_read_unlock(); + if (unlikely(!p->signal)) return; - } + + utime = stime = cputime_zero; switch (who) { case RUSAGE_BOTH: case RUSAGE_CHILDREN: + spin_lock_irqsave(&p->sighand->siglock, flags); utime = p->signal->cutime; stime = p->signal->cstime; r->ru_nvcsw = p->signal->cnvcsw; r->ru_nivcsw = p->signal->cnivcsw; r->ru_minflt = p->signal->cmin_flt; r->ru_majflt = p->signal->cmaj_flt; + spin_unlock_irqrestore(&p->sighand->siglock, flags); if (who == RUSAGE_CHILDREN) break; @@ -1956,9 +1776,6 @@ static void k_getrusage(struct task_struct *p, int who, struct rusage *r) BUG(); } - unlock_task_sighand(p, &flags); - rcu_read_unlock(); - cputime_to_timeval(utime, &r->ru_utime); cputime_to_timeval(stime, &r->ru_stime); } @@ -1966,7 +1783,9 @@ static void k_getrusage(struct task_struct *p, int who, struct rusage *r) int getrusage(struct task_struct *p, int who, struct rusage __user *ru) { struct rusage r; + read_lock(&tasklist_lock); k_getrusage(p, who, &r); + read_unlock(&tasklist_lock); return copy_to_user(ru, &r, sizeof(r)) ? -EFAULT : 0; } @@ -2073,13 +1892,6 @@ asmlinkage long sys_prctl(int option, unsigned long arg2, unsigned long arg3, return -EFAULT; return 0; } - case PR_GET_ENDIAN: - error = GET_ENDIAN(current, arg2); - break; - case PR_SET_ENDIAN: - error = SET_ENDIAN(current, arg2); - break; - default: error = -EINVAL; break;