* and the like.
*/
-static BLOCKING_NOTIFIER_HEAD(reboot_notifier_list);
-
-/*
- * Notifier chain core routines. The exported routines below
- * are layered on top of these, with appropriate locking added.
- */
-
-static int notifier_chain_register(struct notifier_block **nl,
- struct notifier_block *n)
-{
- while ((*nl) != NULL) {
- if (n->priority > (*nl)->priority)
- break;
- nl = &((*nl)->next);
- }
- n->next = *nl;
- rcu_assign_pointer(*nl, n);
- return 0;
-}
-
-static int notifier_chain_unregister(struct notifier_block **nl,
- struct notifier_block *n)
-{
- while ((*nl) != NULL) {
- if ((*nl) == n) {
- rcu_assign_pointer(*nl, n->next);
- return 0;
- }
- nl = &((*nl)->next);
- }
- return -ENOENT;
-}
-
-static int __kprobes notifier_call_chain(struct notifier_block **nl,
- unsigned long val, void *v)
-{
- int ret = NOTIFY_DONE;
- struct notifier_block *nb;
-
- nb = rcu_dereference(*nl);
- while (nb) {
- ret = nb->notifier_call(nb, val, v);
- if ((ret & NOTIFY_STOP_MASK) == NOTIFY_STOP_MASK)
- break;
- nb = rcu_dereference(nb->next);
- }
- return ret;
-}
-
-/*
- * Atomic notifier chain routines. Registration and unregistration
- * use a mutex, and call_chain is synchronized by RCU (no locks).
- */
+static struct notifier_block *reboot_notifier_list;
+static DEFINE_RWLOCK(notifier_lock);
/**
- * atomic_notifier_chain_register - Add notifier to an atomic notifier chain
- * @nh: Pointer to head of the atomic notifier chain
+ * notifier_chain_register - Add notifier to a notifier chain
+ * @list: Pointer to root list pointer
* @n: New entry in notifier chain
*
- * Adds a notifier to an atomic notifier chain.
+ * Adds a notifier to a notifier chain.
*
* Currently always returns zero.
*/
-
-int atomic_notifier_chain_register(struct atomic_notifier_head *nh,
- struct notifier_block *n)
-{
- unsigned long flags;
- int ret;
-
- spin_lock_irqsave(&nh->lock, flags);
- ret = notifier_chain_register(&nh->head, n);
- spin_unlock_irqrestore(&nh->lock, flags);
- return ret;
-}
-
-EXPORT_SYMBOL_GPL(atomic_notifier_chain_register);
-
-/**
- * atomic_notifier_chain_unregister - Remove notifier from an atomic notifier chain
- * @nh: Pointer to head of the atomic notifier chain
- * @n: Entry to remove from notifier chain
- *
- * Removes a notifier from an atomic notifier chain.
- *
- * Returns zero on success or %-ENOENT on failure.
- */
-int atomic_notifier_chain_unregister(struct atomic_notifier_head *nh,
- struct notifier_block *n)
-{
- unsigned long flags;
- int ret;
-
- spin_lock_irqsave(&nh->lock, flags);
- ret = notifier_chain_unregister(&nh->head, n);
- spin_unlock_irqrestore(&nh->lock, flags);
- synchronize_rcu();
- return ret;
-}
-
-EXPORT_SYMBOL_GPL(atomic_notifier_chain_unregister);
-
-/**
- * atomic_notifier_call_chain - Call functions in an atomic notifier chain
- * @nh: Pointer to head of the atomic notifier chain
- * @val: Value passed unmodified to notifier function
- * @v: Pointer passed unmodified to notifier function
- *
- * Calls each function in a notifier chain in turn. The functions
- * run in an atomic context, so they must not block.
- * This routine uses RCU to synchronize with changes to the chain.
- *
- * If the return value of the notifier can be and'ed
- * with %NOTIFY_STOP_MASK then atomic_notifier_call_chain
- * will return immediately, with the return value of
- * the notifier function which halted execution.
- * Otherwise the return value is the return value
- * of the last notifier function called.
- */
-int atomic_notifier_call_chain(struct atomic_notifier_head *nh,
- unsigned long val, void *v)
+int notifier_chain_register(struct notifier_block **list, struct notifier_block *n)
{
- int ret;
-
- rcu_read_lock();
- ret = notifier_call_chain(&nh->head, val, v);
- rcu_read_unlock();
- return ret;
+ write_lock(¬ifier_lock);
+ while(*list)
+ {
+ if(n->priority > (*list)->priority)
+ break;
+ list= &((*list)->next);
+ }
+ n->next = *list;
+ *list=n;
+ write_unlock(¬ifier_lock);
+ return 0;
}
-EXPORT_SYMBOL_GPL(atomic_notifier_call_chain);
-
-/*
- * Blocking notifier chain routines. All access to the chain is
- * synchronized by an rwsem.
- */
+EXPORT_SYMBOL(notifier_chain_register);
/**
- * blocking_notifier_chain_register - Add notifier to a blocking notifier chain
- * @nh: Pointer to head of the blocking notifier chain
+ * notifier_chain_unregister - Remove notifier from a notifier chain
+ * @nl: Pointer to root list pointer
* @n: New entry in notifier chain
*
- * Adds a notifier to a blocking notifier chain.
- * Must be called in process context.
+ * Removes a notifier from a notifier chain.
*
- * Currently always returns zero.
+ * Returns zero on success, or %-ENOENT on failure.
*/
-int blocking_notifier_chain_register(struct blocking_notifier_head *nh,
- struct notifier_block *n)
-{
- int ret;
-
- /*
- * This code gets used during boot-up, when task switching is
- * not yet working and interrupts must remain disabled. At
- * such times we must not call down_write().
- */
- if (unlikely(system_state == SYSTEM_BOOTING))
- return notifier_chain_register(&nh->head, n);
-
- down_write(&nh->rwsem);
- ret = notifier_chain_register(&nh->head, n);
- up_write(&nh->rwsem);
- return ret;
-}
-
-EXPORT_SYMBOL_GPL(blocking_notifier_chain_register);
-
-/**
- * blocking_notifier_chain_unregister - Remove notifier from a blocking notifier chain
- * @nh: Pointer to head of the blocking notifier chain
- * @n: Entry to remove from notifier chain
- *
- * Removes a notifier from a blocking notifier chain.
- * Must be called from process context.
- *
- * Returns zero on success or %-ENOENT on failure.
- */
-int blocking_notifier_chain_unregister(struct blocking_notifier_head *nh,
- struct notifier_block *n)
+int notifier_chain_unregister(struct notifier_block **nl, struct notifier_block *n)
{
- int ret;
-
- /*
- * This code gets used during boot-up, when task switching is
- * not yet working and interrupts must remain disabled. At
- * such times we must not call down_write().
- */
- if (unlikely(system_state == SYSTEM_BOOTING))
- return notifier_chain_unregister(&nh->head, n);
-
- down_write(&nh->rwsem);
- ret = notifier_chain_unregister(&nh->head, n);
- up_write(&nh->rwsem);
- return ret;
+ write_lock(¬ifier_lock);
+ while((*nl)!=NULL)
+ {
+ if((*nl)==n)
+ {
+ *nl=n->next;
+ write_unlock(¬ifier_lock);
+ return 0;
+ }
+ nl=&((*nl)->next);
+ }
+ write_unlock(¬ifier_lock);
+ return -ENOENT;
}
-EXPORT_SYMBOL_GPL(blocking_notifier_chain_unregister);
+EXPORT_SYMBOL(notifier_chain_unregister);
/**
- * blocking_notifier_call_chain - Call functions in a blocking notifier chain
- * @nh: Pointer to head of the blocking notifier chain
+ * notifier_call_chain - Call functions in a notifier chain
+ * @n: Pointer to root pointer of notifier chain
* @val: Value passed unmodified to notifier function
* @v: Pointer passed unmodified to notifier function
*
- * Calls each function in a notifier chain in turn. The functions
- * run in a process context, so they are allowed to block.
+ * Calls each function in a notifier chain in turn.
*
- * If the return value of the notifier can be and'ed
- * with %NOTIFY_STOP_MASK then blocking_notifier_call_chain
+ * If the return value of the notifier can be and'd
+ * with %NOTIFY_STOP_MASK, then notifier_call_chain
* will return immediately, with the return value of
* the notifier function which halted execution.
- * Otherwise the return value is the return value
+ * Otherwise, the return value is the return value
* of the last notifier function called.
*/
-int blocking_notifier_call_chain(struct blocking_notifier_head *nh,
- unsigned long val, void *v)
+int __kprobes notifier_call_chain(struct notifier_block **n, unsigned long val, void *v)
{
- int ret;
+ int ret=NOTIFY_DONE;
+ struct notifier_block *nb = *n;
- down_read(&nh->rwsem);
- ret = notifier_call_chain(&nh->head, val, v);
- up_read(&nh->rwsem);
+ while(nb)
+ {
+ ret=nb->notifier_call(nb,val,v);
+ if(ret&NOTIFY_STOP_MASK)
+ {
+ return ret;
+ }
+ nb=nb->next;
+ }
return ret;
}
-EXPORT_SYMBOL_GPL(blocking_notifier_call_chain);
-
-/*
- * Raw notifier chain routines. There is no protection;
- * the caller must provide it. Use at your own risk!
- */
-
-/**
- * raw_notifier_chain_register - Add notifier to a raw notifier chain
- * @nh: Pointer to head of the raw notifier chain
- * @n: New entry in notifier chain
- *
- * Adds a notifier to a raw notifier chain.
- * All locking must be provided by the caller.
- *
- * Currently always returns zero.
- */
-
-int raw_notifier_chain_register(struct raw_notifier_head *nh,
- struct notifier_block *n)
-{
- return notifier_chain_register(&nh->head, n);
-}
-
-EXPORT_SYMBOL_GPL(raw_notifier_chain_register);
-
-/**
- * raw_notifier_chain_unregister - Remove notifier from a raw notifier chain
- * @nh: Pointer to head of the raw notifier chain
- * @n: Entry to remove from notifier chain
- *
- * Removes a notifier from a raw notifier chain.
- * All locking must be provided by the caller.
- *
- * Returns zero on success or %-ENOENT on failure.
- */
-int raw_notifier_chain_unregister(struct raw_notifier_head *nh,
- struct notifier_block *n)
-{
- return notifier_chain_unregister(&nh->head, n);
-}
-
-EXPORT_SYMBOL_GPL(raw_notifier_chain_unregister);
-
-/**
- * raw_notifier_call_chain - Call functions in a raw notifier chain
- * @nh: Pointer to head of the raw notifier chain
- * @val: Value passed unmodified to notifier function
- * @v: Pointer passed unmodified to notifier function
- *
- * Calls each function in a notifier chain in turn. The functions
- * run in an undefined context.
- * All locking must be provided by the caller.
- *
- * If the return value of the notifier can be and'ed
- * with %NOTIFY_STOP_MASK then raw_notifier_call_chain
- * will return immediately, with the return value of
- * the notifier function which halted execution.
- * Otherwise the return value is the return value
- * of the last notifier function called.
- */
-
-int raw_notifier_call_chain(struct raw_notifier_head *nh,
- unsigned long val, void *v)
-{
- return notifier_call_chain(&nh->head, val, v);
-}
-
-EXPORT_SYMBOL_GPL(raw_notifier_call_chain);
+EXPORT_SYMBOL(notifier_call_chain);
/**
* register_reboot_notifier - Register function to be called at reboot time
* Registers a function with the list of functions
* to be called at reboot time.
*
- * Currently always returns zero, as blocking_notifier_chain_register
+ * Currently always returns zero, as notifier_chain_register
* always returns zero.
*/
int register_reboot_notifier(struct notifier_block * nb)
{
- return blocking_notifier_chain_register(&reboot_notifier_list, nb);
+ return notifier_chain_register(&reboot_notifier_list, nb);
}
EXPORT_SYMBOL(register_reboot_notifier);
int unregister_reboot_notifier(struct notifier_block * nb)
{
- return blocking_notifier_chain_unregister(&reboot_notifier_list, nb);
+ return notifier_chain_unregister(&reboot_notifier_list, nb);
}
EXPORT_SYMBOL(unregister_reboot_notifier);
+#ifndef CONFIG_SECURITY
+int capable(int cap)
+{
+ if (vx_check_bit(VXC_CAP_MASK, cap) && !vx_mcaps(1L << cap))
+ return 0;
+ if (cap_raised(current->cap_effective, cap)) {
+ current->flags |= PF_SUPERPRIV;
+ return 1;
+ }
+ return 0;
+}
+EXPORT_SYMBOL(capable);
+#endif
+
static int set_one_prio(struct task_struct *p, int niceval, int error)
{
int no_nice;
void kernel_restart_prepare(char *cmd)
{
- blocking_notifier_call_chain(&reboot_notifier_list, SYS_RESTART, cmd);
+ notifier_call_chain(&reboot_notifier_list, SYS_RESTART, cmd);
system_state = SYSTEM_RESTART;
device_shutdown();
}
void kernel_shutdown_prepare(enum system_states state)
{
- blocking_notifier_call_chain(&reboot_notifier_list,
+ notifier_call_chain(&reboot_notifier_list,
(state == SYSTEM_HALT)?SYS_HALT:SYS_POWER_OFF, NULL);
system_state = state;
device_shutdown();
*/
if (tbuf) {
struct tms tmp;
- struct task_struct *tsk = current;
- struct task_struct *t;
cputime_t utime, stime, cutime, cstime;
- spin_lock_irq(&tsk->sighand->siglock);
- utime = tsk->signal->utime;
- stime = tsk->signal->stime;
- t = tsk;
- do {
- utime = cputime_add(utime, t->utime);
- stime = cputime_add(stime, t->stime);
- t = next_thread(t);
- } while (t != tsk);
+#ifdef CONFIG_SMP
+ if (thread_group_empty(current)) {
+ /*
+ * Single thread case without the use of any locks.
+ *
+ * We may race with release_task if two threads are
+ * executing. However, release task first adds up the
+ * counters (__exit_signal) before removing the task
+ * from the process tasklist (__unhash_process).
+ * __exit_signal also acquires and releases the
+ * siglock which results in the proper memory ordering
+ * so that the list modifications are always visible
+ * after the counters have been updated.
+ *
+ * If the counters have been updated by the second thread
+ * but the thread has not yet been removed from the list
+ * then the other branch will be executing which will
+ * block on tasklist_lock until the exit handling of the
+ * other task is finished.
+ *
+ * This also implies that the sighand->siglock cannot
+ * be held by another processor. So we can also
+ * skip acquiring that lock.
+ */
+ utime = cputime_add(current->signal->utime, current->utime);
+ stime = cputime_add(current->signal->utime, current->stime);
+ cutime = current->signal->cutime;
+ cstime = current->signal->cstime;
+ } else
+#endif
+ {
+
+ /* Process with multiple threads */
+ struct task_struct *tsk = current;
+ struct task_struct *t;
- cutime = tsk->signal->cutime;
- cstime = tsk->signal->cstime;
- spin_unlock_irq(&tsk->sighand->siglock);
+ read_lock(&tasklist_lock);
+ utime = tsk->signal->utime;
+ stime = tsk->signal->stime;
+ t = tsk;
+ do {
+ utime = cputime_add(utime, t->utime);
+ stime = cputime_add(stime, t->stime);
+ t = next_thread(t);
+ } while (t != tsk);
+ /*
+ * While we have tasklist_lock read-locked, no dying thread
+ * can be updating current->signal->[us]time. Instead,
+ * we got their counts included in the live thread loop.
+ * However, another thread can come in right now and
+ * do a wait call that updates current->signal->c[us]time.
+ * To make sure we always see that pair updated atomically,
+ * we take the siglock around fetching them.
+ */
+ spin_lock_irq(&tsk->sighand->siglock);
+ cutime = tsk->signal->cutime;
+ cstime = tsk->signal->cstime;
+ spin_unlock_irq(&tsk->sighand->siglock);
+ read_unlock(&tasklist_lock);
+ }
tmp.tms_utime = cputime_to_clock_t(utime);
tmp.tms_stime = cputime_to_clock_t(stime);
tmp.tms_cutime = cputime_to_clock_t(cutime);
asmlinkage long sys_setsid(void)
{
struct task_struct *group_leader = current->group_leader;
- pid_t session;
+ struct pid *pid;
int err = -EPERM;
- mutex_lock(&tty_mutex);
+ down(&tty_sem);
write_lock_irq(&tasklist_lock);
- /* Fail if I am already a session leader */
- if (group_leader->signal->leader)
- goto out;
-
- session = group_leader->pid;
- /* Fail if a process group id already exists that equals the
- * proposed session id.
- *
- * Don't check if session id == 1 because kernel threads use this
- * session id and so the check will always fail and make it so
- * init cannot successfully call setsid.
- */
- if (session > 1 && find_task_by_pid_type(PIDTYPE_PGID, session))
+ pid = find_pid(PIDTYPE_PGID, group_leader->pid);
+ if (pid)
goto out;
group_leader->signal->leader = 1;
- __set_special_pids(session, session);
+ __set_special_pids(group_leader->pid, group_leader->pid);
group_leader->signal->tty = NULL;
group_leader->signal->tty_old_pgrp = 0;
err = process_group(group_leader);
out:
write_unlock_irq(&tasklist_lock);
- mutex_unlock(&tty_mutex);
+ up(&tty_sem);
return err;
}
/* a simple bsearch */
int groups_search(struct group_info *group_info, gid_t grp)
{
- unsigned int left, right;
+ int left, right;
if (!group_info)
return 0;
left = 0;
right = group_info->ngroups;
while (left < right) {
- unsigned int mid = (left+right)/2;
+ int mid = (left+right)/2;
int cmp = grp - GROUP_AT(group_info, mid);
if (cmp > 0)
left = mid + 1;
return -EINVAL;
/* no need to grab task_lock here; it cannot change */
+ get_group_info(current->group_info);
i = current->group_info->ngroups;
if (gidsetsize) {
if (i > gidsetsize) {
}
}
out:
+ put_group_info(current->group_info);
return i;
}
{
int retval = 1;
if (grp != current->fsgid) {
+ get_group_info(current->group_info);
retval = groups_search(current->group_info, grp);
+ put_group_info(current->group_info);
}
return retval;
}
{
int retval = 1;
if (grp != current->egid) {
+ get_group_info(current->group_info);
retval = groups_search(current->group_info, grp);
+ put_group_info(current->group_info);
}
return retval;
}
asmlinkage long sys_setrlimit(unsigned int resource, struct rlimit __user *rlim)
{
struct rlimit new_rlim, *old_rlim;
- unsigned long it_prof_secs;
int retval;
if (resource >= RLIM_NLIMITS)
return -EINVAL;
- if (copy_from_user(&new_rlim, rlim, sizeof(*rlim)))
+ if(copy_from_user(&new_rlim, rlim, sizeof(*rlim)))
return -EFAULT;
- if (new_rlim.rlim_cur > new_rlim.rlim_max)
- return -EINVAL;
+ if (new_rlim.rlim_cur > new_rlim.rlim_max)
+ return -EINVAL;
old_rlim = current->signal->rlim + resource;
if ((new_rlim.rlim_max > old_rlim->rlim_max) &&
!vx_capable(CAP_SYS_RESOURCE, VXC_SET_RLIMIT))
return -EPERM;
if (resource == RLIMIT_NOFILE && new_rlim.rlim_max > NR_OPEN)
- return -EPERM;
+ return -EPERM;
retval = security_task_setrlimit(resource, &new_rlim);
if (retval)
*old_rlim = new_rlim;
task_unlock(current->group_leader);
- if (resource != RLIMIT_CPU)
- goto out;
-
- /*
- * RLIMIT_CPU handling. Note that the kernel fails to return an error
- * code if it rejected the user's attempt to set RLIMIT_CPU. This is a
- * very long-standing error, and fixing it now risks breakage of
- * applications, so we live with it
- */
- if (new_rlim.rlim_cur == RLIM_INFINITY)
- goto out;
-
- it_prof_secs = cputime_to_secs(current->signal->it_prof_expires);
- if (it_prof_secs == 0 || new_rlim.rlim_cur <= it_prof_secs) {
+ if (resource == RLIMIT_CPU && new_rlim.rlim_cur != RLIM_INFINITY &&
+ (cputime_eq(current->signal->it_prof_expires, cputime_zero) ||
+ new_rlim.rlim_cur <= cputime_to_secs(
+ current->signal->it_prof_expires))) {
unsigned long rlim_cur = new_rlim.rlim_cur;
cputime_t cputime;
cputime = secs_to_cputime(rlim_cur);
read_lock(&tasklist_lock);
spin_lock_irq(¤t->sighand->siglock);
- set_process_cpu_timer(current, CPUCLOCK_PROF, &cputime, NULL);
+ set_process_cpu_timer(current, CPUCLOCK_PROF,
+ &cputime, NULL);
spin_unlock_irq(¤t->sighand->siglock);
read_unlock(&tasklist_lock);
}
-out:
+
return 0;
}
* a lot simpler! (Which we're not doing right now because we're not
* measuring them yet).
*
+ * This expects to be called with tasklist_lock read-locked or better,
+ * and the siglock not locked. It may momentarily take the siglock.
+ *
* When sampling multiple threads for RUSAGE_SELF, under SMP we might have
* races with threads incrementing their own counters. But since word
* reads are atomic, we either get new values or old values and we don't
* the c* fields from p->signal from races with exit.c updating those
* fields when reaping, so a sample either gets all the additions of a
* given child after it's reaped, or none so this sample is before reaping.
- *
- * tasklist_lock locking optimisation:
- * If we are current and single threaded, we do not need to take the tasklist
- * lock or the siglock. No one else can take our signal_struct away,
- * no one else can reap the children to update signal->c* counters, and
- * no one else can race with the signal-> fields.
- * If we do not take the tasklist_lock, the signal-> fields could be read
- * out of order while another thread was just exiting. So we place a
- * read memory barrier when we avoid the lock. On the writer side,
- * write memory barrier is implied in __exit_signal as __exit_signal releases
- * the siglock spinlock after updating the signal-> fields.
- *
- * We don't really need the siglock when we access the non c* fields
- * of the signal_struct (for RUSAGE_SELF) even in multithreaded
- * case, since we take the tasklist lock for read and the non c* signal->
- * fields are updated only in __exit_signal, which is called with
- * tasklist_lock taken for write, hence these two threads cannot execute
- * concurrently.
- *
*/
static void k_getrusage(struct task_struct *p, int who, struct rusage *r)
struct task_struct *t;
unsigned long flags;
cputime_t utime, stime;
- int need_lock = 0;
memset((char *) r, 0, sizeof *r);
- utime = stime = cputime_zero;
- if (p != current || !thread_group_empty(p))
- need_lock = 1;
+ if (unlikely(!p->signal))
+ return;
- if (need_lock) {
- read_lock(&tasklist_lock);
- if (unlikely(!p->signal)) {
- read_unlock(&tasklist_lock);
- return;
- }
- } else
- /* See locking comments above */
- smp_rmb();
+ utime = stime = cputime_zero;
switch (who) {
case RUSAGE_BOTH:
BUG();
}
- if (need_lock)
- read_unlock(&tasklist_lock);
cputime_to_timeval(utime, &r->ru_utime);
cputime_to_timeval(stime, &r->ru_stime);
}
int getrusage(struct task_struct *p, int who, struct rusage __user *ru)
{
struct rusage r;
+ read_lock(&tasklist_lock);
k_getrusage(p, who, &r);
+ read_unlock(&tasklist_lock);
return copy_to_user(ru, &r, sizeof(r)) ? -EFAULT : 0;
}