* Copyright (C) 1991, 1992 Linus Torvalds
*/
-#include <linux/config.h>
-#include <linux/compat.h>
#include <linux/module.h>
#include <linux/mm.h>
#include <linux/utsname.h>
#include <linux/mman.h>
#include <linux/smp_lock.h>
#include <linux/notifier.h>
-#include <linux/kmod.h>
#include <linux/reboot.h>
#include <linux/prctl.h>
-#include <linux/init.h>
#include <linux/highuid.h>
#include <linux/fs.h>
+#include <linux/kernel.h>
+#include <linux/kexec.h>
#include <linux/workqueue.h>
+#include <linux/capability.h>
#include <linux/device.h>
+#include <linux/key.h>
#include <linux/times.h>
+#include <linux/posix-timers.h>
#include <linux/security.h>
#include <linux/dcookies.h>
#include <linux/suspend.h>
-#include <linux/vs_base.h>
-#include <linux/vs_cvirt.h>
+#include <linux/tty.h>
+#include <linux/signal.h>
+#include <linux/cn_proc.h>
+#include <linux/getcpu.h>
+
+#include <linux/compat.h>
+#include <linux/syscalls.h>
+#include <linux/kprobes.h>
+#include <linux/vs_pid.h>
#include <asm/uaccess.h>
#include <asm/io.h>
#ifndef GET_FPEXC_CTL
# define GET_FPEXC_CTL(a,b) (-EINVAL)
#endif
+#ifndef GET_ENDIAN
+# define GET_ENDIAN(a,b) (-EINVAL)
+#endif
+#ifndef SET_ENDIAN
+# define SET_ENDIAN(a,b) (-EINVAL)
+#endif
/*
* this is where the system-wide overflow UID and GID are defined, for
*/
int C_A_D = 1;
-int cad_pid = 1;
+struct pid *cad_pid;
+EXPORT_SYMBOL(cad_pid);
/*
* Notifier list for kernel code which wants to be called
* and the like.
*/
-static struct notifier_block *reboot_notifier_list;
-rwlock_t notifier_lock = RW_LOCK_UNLOCKED;
+static BLOCKING_NOTIFIER_HEAD(reboot_notifier_list);
+
+/*
+ * Notifier chain core routines. The exported routines below
+ * are layered on top of these, with appropriate locking added.
+ */
+
+static int notifier_chain_register(struct notifier_block **nl,
+ struct notifier_block *n)
+{
+ while ((*nl) != NULL) {
+ if (n->priority > (*nl)->priority)
+ break;
+ nl = &((*nl)->next);
+ }
+ n->next = *nl;
+ rcu_assign_pointer(*nl, n);
+ return 0;
+}
+
+static int notifier_chain_unregister(struct notifier_block **nl,
+ struct notifier_block *n)
+{
+ while ((*nl) != NULL) {
+ if ((*nl) == n) {
+ rcu_assign_pointer(*nl, n->next);
+ return 0;
+ }
+ nl = &((*nl)->next);
+ }
+ return -ENOENT;
+}
+
+static int __kprobes notifier_call_chain(struct notifier_block **nl,
+ unsigned long val, void *v)
+{
+ int ret = NOTIFY_DONE;
+ struct notifier_block *nb, *next_nb;
+
+ nb = rcu_dereference(*nl);
+ while (nb) {
+ next_nb = rcu_dereference(nb->next);
+ ret = nb->notifier_call(nb, val, v);
+ if ((ret & NOTIFY_STOP_MASK) == NOTIFY_STOP_MASK)
+ break;
+ nb = next_nb;
+ }
+ return ret;
+}
+
+/*
+ * Atomic notifier chain routines. Registration and unregistration
+ * use a spinlock, and call_chain is synchronized by RCU (no locks).
+ */
/**
- * notifier_chain_register - Add notifier to a notifier chain
- * @list: Pointer to root list pointer
+ * atomic_notifier_chain_register - Add notifier to an atomic notifier chain
+ * @nh: Pointer to head of the atomic notifier chain
* @n: New entry in notifier chain
*
- * Adds a notifier to a notifier chain.
+ * Adds a notifier to an atomic notifier chain.
*
* Currently always returns zero.
*/
+
+int atomic_notifier_chain_register(struct atomic_notifier_head *nh,
+ struct notifier_block *n)
+{
+ unsigned long flags;
+ int ret;
+
+ spin_lock_irqsave(&nh->lock, flags);
+ ret = notifier_chain_register(&nh->head, n);
+ spin_unlock_irqrestore(&nh->lock, flags);
+ return ret;
+}
+
+EXPORT_SYMBOL_GPL(atomic_notifier_chain_register);
+
+/**
+ * atomic_notifier_chain_unregister - Remove notifier from an atomic notifier chain
+ * @nh: Pointer to head of the atomic notifier chain
+ * @n: Entry to remove from notifier chain
+ *
+ * Removes a notifier from an atomic notifier chain.
+ *
+ * Returns zero on success or %-ENOENT on failure.
+ */
+int atomic_notifier_chain_unregister(struct atomic_notifier_head *nh,
+ struct notifier_block *n)
+{
+ unsigned long flags;
+ int ret;
+
+ spin_lock_irqsave(&nh->lock, flags);
+ ret = notifier_chain_unregister(&nh->head, n);
+ spin_unlock_irqrestore(&nh->lock, flags);
+ synchronize_rcu();
+ return ret;
+}
+
+EXPORT_SYMBOL_GPL(atomic_notifier_chain_unregister);
+
+/**
+ * atomic_notifier_call_chain - Call functions in an atomic notifier chain
+ * @nh: Pointer to head of the atomic notifier chain
+ * @val: Value passed unmodified to notifier function
+ * @v: Pointer passed unmodified to notifier function
+ *
+ * Calls each function in a notifier chain in turn. The functions
+ * run in an atomic context, so they must not block.
+ * This routine uses RCU to synchronize with changes to the chain.
+ *
+ * If the return value of the notifier can be and'ed
+ * with %NOTIFY_STOP_MASK then atomic_notifier_call_chain
+ * will return immediately, with the return value of
+ * the notifier function which halted execution.
+ * Otherwise the return value is the return value
+ * of the last notifier function called.
+ */
-int notifier_chain_register(struct notifier_block **list, struct notifier_block *n)
+int __kprobes atomic_notifier_call_chain(struct atomic_notifier_head *nh,
+ unsigned long val, void *v)
{
- write_lock(¬ifier_lock);
- while(*list)
- {
- if(n->priority > (*list)->priority)
- break;
- list= &((*list)->next);
- }
- n->next = *list;
- *list=n;
- write_unlock(¬ifier_lock);
- return 0;
+ int ret;
+
+ rcu_read_lock();
+ ret = notifier_call_chain(&nh->head, val, v);
+ rcu_read_unlock();
+ return ret;
}
-EXPORT_SYMBOL(notifier_chain_register);
+EXPORT_SYMBOL_GPL(atomic_notifier_call_chain);
+
+/*
+ * Blocking notifier chain routines. All access to the chain is
+ * synchronized by an rwsem.
+ */
/**
- * notifier_chain_unregister - Remove notifier from a notifier chain
- * @nl: Pointer to root list pointer
+ * blocking_notifier_chain_register - Add notifier to a blocking notifier chain
+ * @nh: Pointer to head of the blocking notifier chain
* @n: New entry in notifier chain
*
- * Removes a notifier from a notifier chain.
+ * Adds a notifier to a blocking notifier chain.
+ * Must be called in process context.
*
- * Returns zero on success, or %-ENOENT on failure.
+ * Currently always returns zero.
*/
-int notifier_chain_unregister(struct notifier_block **nl, struct notifier_block *n)
+int blocking_notifier_chain_register(struct blocking_notifier_head *nh,
+ struct notifier_block *n)
{
- write_lock(¬ifier_lock);
- while((*nl)!=NULL)
- {
- if((*nl)==n)
- {
- *nl=n->next;
- write_unlock(¬ifier_lock);
- return 0;
- }
- nl=&((*nl)->next);
- }
- write_unlock(¬ifier_lock);
- return -ENOENT;
+ int ret;
+
+ /*
+ * This code gets used during boot-up, when task switching is
+ * not yet working and interrupts must remain disabled. At
+ * such times we must not call down_write().
+ */
+ if (unlikely(system_state == SYSTEM_BOOTING))
+ return notifier_chain_register(&nh->head, n);
+
+ down_write(&nh->rwsem);
+ ret = notifier_chain_register(&nh->head, n);
+ up_write(&nh->rwsem);
+ return ret;
}
-EXPORT_SYMBOL(notifier_chain_unregister);
+EXPORT_SYMBOL_GPL(blocking_notifier_chain_register);
/**
- * notifier_call_chain - Call functions in a notifier chain
- * @n: Pointer to root pointer of notifier chain
+ * blocking_notifier_chain_unregister - Remove notifier from a blocking notifier chain
+ * @nh: Pointer to head of the blocking notifier chain
+ * @n: Entry to remove from notifier chain
+ *
+ * Removes a notifier from a blocking notifier chain.
+ * Must be called from process context.
+ *
+ * Returns zero on success or %-ENOENT on failure.
+ */
+int blocking_notifier_chain_unregister(struct blocking_notifier_head *nh,
+ struct notifier_block *n)
+{
+ int ret;
+
+ /*
+ * This code gets used during boot-up, when task switching is
+ * not yet working and interrupts must remain disabled. At
+ * such times we must not call down_write().
+ */
+ if (unlikely(system_state == SYSTEM_BOOTING))
+ return notifier_chain_unregister(&nh->head, n);
+
+ down_write(&nh->rwsem);
+ ret = notifier_chain_unregister(&nh->head, n);
+ up_write(&nh->rwsem);
+ return ret;
+}
+
+EXPORT_SYMBOL_GPL(blocking_notifier_chain_unregister);
+
+/**
+ * blocking_notifier_call_chain - Call functions in a blocking notifier chain
+ * @nh: Pointer to head of the blocking notifier chain
* @val: Value passed unmodified to notifier function
* @v: Pointer passed unmodified to notifier function
*
- * Calls each function in a notifier chain in turn.
+ * Calls each function in a notifier chain in turn. The functions
+ * run in a process context, so they are allowed to block.
*
- * If the return value of the notifier can be and'd
- * with %NOTIFY_STOP_MASK, then notifier_call_chain
+ * If the return value of the notifier can be and'ed
+ * with %NOTIFY_STOP_MASK then blocking_notifier_call_chain
* will return immediately, with the return value of
* the notifier function which halted execution.
- * Otherwise, the return value is the return value
+ * Otherwise the return value is the return value
* of the last notifier function called.
*/
-int notifier_call_chain(struct notifier_block **n, unsigned long val, void *v)
+int blocking_notifier_call_chain(struct blocking_notifier_head *nh,
+ unsigned long val, void *v)
{
- int ret=NOTIFY_DONE;
- struct notifier_block *nb = *n;
+ int ret = NOTIFY_DONE;
- while(nb)
- {
- ret=nb->notifier_call(nb,val,v);
- if(ret&NOTIFY_STOP_MASK)
- {
- return ret;
- }
- nb=nb->next;
+ /*
+ * We check the head outside the lock, but if this access is
+ * racy then it does not matter what the result of the test
+ * is, we re-check the list after having taken the lock anyway:
+ */
+ if (rcu_dereference(nh->head)) {
+ down_read(&nh->rwsem);
+ ret = notifier_call_chain(&nh->head, val, v);
+ up_read(&nh->rwsem);
}
return ret;
}
-EXPORT_SYMBOL(notifier_call_chain);
+EXPORT_SYMBOL_GPL(blocking_notifier_call_chain);
+
+/*
+ * Raw notifier chain routines. There is no protection;
+ * the caller must provide it. Use at your own risk!
+ */
+
+/**
+ * raw_notifier_chain_register - Add notifier to a raw notifier chain
+ * @nh: Pointer to head of the raw notifier chain
+ * @n: New entry in notifier chain
+ *
+ * Adds a notifier to a raw notifier chain.
+ * All locking must be provided by the caller.
+ *
+ * Currently always returns zero.
+ */
+
+int raw_notifier_chain_register(struct raw_notifier_head *nh,
+ struct notifier_block *n)
+{
+ return notifier_chain_register(&nh->head, n);
+}
+
+EXPORT_SYMBOL_GPL(raw_notifier_chain_register);
+
+/**
+ * raw_notifier_chain_unregister - Remove notifier from a raw notifier chain
+ * @nh: Pointer to head of the raw notifier chain
+ * @n: Entry to remove from notifier chain
+ *
+ * Removes a notifier from a raw notifier chain.
+ * All locking must be provided by the caller.
+ *
+ * Returns zero on success or %-ENOENT on failure.
+ */
+int raw_notifier_chain_unregister(struct raw_notifier_head *nh,
+ struct notifier_block *n)
+{
+ return notifier_chain_unregister(&nh->head, n);
+}
+
+EXPORT_SYMBOL_GPL(raw_notifier_chain_unregister);
+
+/**
+ * raw_notifier_call_chain - Call functions in a raw notifier chain
+ * @nh: Pointer to head of the raw notifier chain
+ * @val: Value passed unmodified to notifier function
+ * @v: Pointer passed unmodified to notifier function
+ *
+ * Calls each function in a notifier chain in turn. The functions
+ * run in an undefined context.
+ * All locking must be provided by the caller.
+ *
+ * If the return value of the notifier can be and'ed
+ * with %NOTIFY_STOP_MASK then raw_notifier_call_chain
+ * will return immediately, with the return value of
+ * the notifier function which halted execution.
+ * Otherwise the return value is the return value
+ * of the last notifier function called.
+ */
+
+int raw_notifier_call_chain(struct raw_notifier_head *nh,
+ unsigned long val, void *v)
+{
+ return notifier_call_chain(&nh->head, val, v);
+}
+
+EXPORT_SYMBOL_GPL(raw_notifier_call_chain);
+
+/*
+ * SRCU notifier chain routines. Registration and unregistration
+ * use a mutex, and call_chain is synchronized by SRCU (no locks).
+ */
+
+/**
+ * srcu_notifier_chain_register - Add notifier to an SRCU notifier chain
+ * @nh: Pointer to head of the SRCU notifier chain
+ * @n: New entry in notifier chain
+ *
+ * Adds a notifier to an SRCU notifier chain.
+ * Must be called in process context.
+ *
+ * Currently always returns zero.
+ */
+
+int srcu_notifier_chain_register(struct srcu_notifier_head *nh,
+ struct notifier_block *n)
+{
+ int ret;
+
+ /*
+ * This code gets used during boot-up, when task switching is
+ * not yet working and interrupts must remain disabled. At
+ * such times we must not call mutex_lock().
+ */
+ if (unlikely(system_state == SYSTEM_BOOTING))
+ return notifier_chain_register(&nh->head, n);
+
+ mutex_lock(&nh->mutex);
+ ret = notifier_chain_register(&nh->head, n);
+ mutex_unlock(&nh->mutex);
+ return ret;
+}
+
+EXPORT_SYMBOL_GPL(srcu_notifier_chain_register);
+
+/**
+ * srcu_notifier_chain_unregister - Remove notifier from an SRCU notifier chain
+ * @nh: Pointer to head of the SRCU notifier chain
+ * @n: Entry to remove from notifier chain
+ *
+ * Removes a notifier from an SRCU notifier chain.
+ * Must be called from process context.
+ *
+ * Returns zero on success or %-ENOENT on failure.
+ */
+int srcu_notifier_chain_unregister(struct srcu_notifier_head *nh,
+ struct notifier_block *n)
+{
+ int ret;
+
+ /*
+ * This code gets used during boot-up, when task switching is
+ * not yet working and interrupts must remain disabled. At
+ * such times we must not call mutex_lock().
+ */
+ if (unlikely(system_state == SYSTEM_BOOTING))
+ return notifier_chain_unregister(&nh->head, n);
+
+ mutex_lock(&nh->mutex);
+ ret = notifier_chain_unregister(&nh->head, n);
+ mutex_unlock(&nh->mutex);
+ synchronize_srcu(&nh->srcu);
+ return ret;
+}
+
+EXPORT_SYMBOL_GPL(srcu_notifier_chain_unregister);
+
+/**
+ * srcu_notifier_call_chain - Call functions in an SRCU notifier chain
+ * @nh: Pointer to head of the SRCU notifier chain
+ * @val: Value passed unmodified to notifier function
+ * @v: Pointer passed unmodified to notifier function
+ *
+ * Calls each function in a notifier chain in turn. The functions
+ * run in a process context, so they are allowed to block.
+ *
+ * If the return value of the notifier can be and'ed
+ * with %NOTIFY_STOP_MASK then srcu_notifier_call_chain
+ * will return immediately, with the return value of
+ * the notifier function which halted execution.
+ * Otherwise the return value is the return value
+ * of the last notifier function called.
+ */
+
+int srcu_notifier_call_chain(struct srcu_notifier_head *nh,
+ unsigned long val, void *v)
+{
+ int ret;
+ int idx;
+
+ idx = srcu_read_lock(&nh->srcu);
+ ret = notifier_call_chain(&nh->head, val, v);
+ srcu_read_unlock(&nh->srcu, idx);
+ return ret;
+}
+
+EXPORT_SYMBOL_GPL(srcu_notifier_call_chain);
+
+/**
+ * srcu_init_notifier_head - Initialize an SRCU notifier head
+ * @nh: Pointer to head of the srcu notifier chain
+ *
+ * Unlike other sorts of notifier heads, SRCU notifier heads require
+ * dynamic initialization. Be sure to call this routine before
+ * calling any of the other SRCU notifier routines for this head.
+ *
+ * If an SRCU notifier head is deallocated, it must first be cleaned
+ * up by calling srcu_cleanup_notifier_head(). Otherwise the head's
+ * per-cpu data (used by the SRCU mechanism) will leak.
+ */
+
+void srcu_init_notifier_head(struct srcu_notifier_head *nh)
+{
+ mutex_init(&nh->mutex);
+ if (init_srcu_struct(&nh->srcu) < 0)
+ BUG();
+ nh->head = NULL;
+}
+
+EXPORT_SYMBOL_GPL(srcu_init_notifier_head);
/**
* register_reboot_notifier - Register function to be called at reboot time
* Registers a function with the list of functions
* to be called at reboot time.
*
- * Currently always returns zero, as notifier_chain_register
+ * Currently always returns zero, as blocking_notifier_chain_register
* always returns zero.
*/
int register_reboot_notifier(struct notifier_block * nb)
{
- return notifier_chain_register(&reboot_notifier_list, nb);
+ return blocking_notifier_chain_register(&reboot_notifier_list, nb);
}
EXPORT_SYMBOL(register_reboot_notifier);
int unregister_reboot_notifier(struct notifier_block * nb)
{
- return notifier_chain_unregister(&reboot_notifier_list, nb);
+ return blocking_notifier_chain_unregister(&reboot_notifier_list, nb);
}
EXPORT_SYMBOL(unregister_reboot_notifier);
-asmlinkage long sys_ni_syscall(void)
-{
- return -ENOSYS;
-}
-
-cond_syscall(sys_nfsservctl)
-cond_syscall(sys_quotactl)
-cond_syscall(sys_acct)
-cond_syscall(sys_lookup_dcookie)
-cond_syscall(sys_swapon)
-cond_syscall(sys_swapoff)
-cond_syscall(sys_init_module)
-cond_syscall(sys_delete_module)
-cond_syscall(sys_socketpair)
-cond_syscall(sys_bind)
-cond_syscall(sys_listen)
-cond_syscall(sys_accept)
-cond_syscall(sys_connect)
-cond_syscall(sys_getsockname)
-cond_syscall(sys_getpeername)
-cond_syscall(sys_sendto)
-cond_syscall(sys_send)
-cond_syscall(sys_recvfrom)
-cond_syscall(sys_recv)
-cond_syscall(sys_socket)
-cond_syscall(sys_setsockopt)
-cond_syscall(sys_getsockopt)
-cond_syscall(sys_shutdown)
-cond_syscall(sys_sendmsg)
-cond_syscall(sys_recvmsg)
-cond_syscall(sys_socketcall)
-cond_syscall(sys_futex)
-cond_syscall(compat_sys_futex)
-cond_syscall(sys_epoll_create)
-cond_syscall(sys_epoll_ctl)
-cond_syscall(sys_epoll_wait)
-cond_syscall(sys_semget)
-cond_syscall(sys_semop)
-cond_syscall(sys_semtimedop)
-cond_syscall(sys_semctl)
-cond_syscall(sys_msgget)
-cond_syscall(sys_msgsnd)
-cond_syscall(sys_msgrcv)
-cond_syscall(sys_msgctl)
-cond_syscall(sys_shmget)
-cond_syscall(sys_shmdt)
-cond_syscall(sys_shmctl)
-cond_syscall(sys_mq_open)
-cond_syscall(sys_mq_unlink)
-cond_syscall(sys_mq_timedsend)
-cond_syscall(sys_mq_timedreceive)
-cond_syscall(sys_mq_notify)
-cond_syscall(sys_mq_getsetattr)
-cond_syscall(compat_sys_mq_open)
-cond_syscall(compat_sys_mq_timedsend)
-cond_syscall(compat_sys_mq_timedreceive)
-cond_syscall(compat_sys_mq_notify)
-cond_syscall(compat_sys_mq_getsetattr)
-cond_syscall(sys_mbind)
-cond_syscall(sys_get_mempolicy)
-cond_syscall(sys_set_mempolicy)
-cond_syscall(compat_mbind)
-cond_syscall(compat_get_mempolicy)
-cond_syscall(compat_set_mempolicy)
-
-/* arch-specific weak syscall entries */
-cond_syscall(sys_pciconfig_read)
-cond_syscall(sys_pciconfig_write)
-cond_syscall(sys_pciconfig_iobase)
-
static int set_one_prio(struct task_struct *p, int niceval, int error)
{
int no_nice;
if (p->uid != current->euid &&
- p->uid != current->uid && !capable(CAP_SYS_NICE)) {
+ p->euid != current->euid && !capable(CAP_SYS_NICE)) {
error = -EPERM;
goto out;
}
- if (niceval < task_nice(p) && !capable(CAP_SYS_NICE)) {
+ if (niceval < task_nice(p) && !can_nice(p, niceval)) {
if (vx_flags(VXF_IGNEG_NICE, 0))
error = 0;
else
if (!who)
who = process_group(current);
do_each_task_pid(who, PIDTYPE_PGID, p) {
+ if (!vx_check(p->xid, VS_ADMIN_P | VS_IDENT))
+ continue;
error = set_one_prio(p, niceval, error);
} while_each_task_pid(who, PIDTYPE_PGID, p);
break;
case PRIO_USER:
+ user = current->user;
if (!who)
- user = current->user;
+ who = current->uid;
else
- user = find_user(vx_current_xid(), who);
-
- if (!user)
- goto out_unlock;
+ if ((who != current->uid) &&
+ !(user = find_user(vx_current_xid(), who)))
+ goto out_unlock; /* No processes for this user */
do_each_thread(g, p)
if (p->uid == who)
error = set_one_prio(p, niceval, error);
while_each_thread(g, p);
- if (who)
+ if (who != current->uid)
free_uid(user); /* For find_user() */
break;
}
if (!who)
who = process_group(current);
do_each_task_pid(who, PIDTYPE_PGID, p) {
+ if (!vx_check(p->xid, VS_ADMIN_P | VS_IDENT))
+ continue;
niceval = 20 - task_nice(p);
if (niceval > retval)
retval = niceval;
} while_each_task_pid(who, PIDTYPE_PGID, p);
break;
case PRIO_USER:
+ user = current->user;
if (!who)
- user = current->user;
+ who = current->uid;
else
- user = find_user(vx_current_xid(), who);
-
- if (!user)
- goto out_unlock;
+ if ((who != current->uid) &&
+ !(user = find_user(vx_current_xid(), who)))
+ goto out_unlock; /* No processes for this user */
do_each_thread(g, p)
if (p->uid == who) {
retval = niceval;
}
while_each_thread(g, p);
- if (who)
+ if (who != current->uid)
free_uid(user); /* for find_user() */
break;
}
return retval;
}
-long vs_reboot(unsigned int, void *);
+/**
+ * emergency_restart - reboot the system
+ *
+ * Without shutting down any hardware or taking any locks
+ * reboot the system. This is called when we know we are in
+ * trouble so this is our best effort to reboot. This is
+ * safe to call in interrupt context.
+ */
+void emergency_restart(void)
+{
+ machine_emergency_restart();
+}
+EXPORT_SYMBOL_GPL(emergency_restart);
+
+static void kernel_restart_prepare(char *cmd)
+{
+ blocking_notifier_call_chain(&reboot_notifier_list, SYS_RESTART, cmd);
+ system_state = SYSTEM_RESTART;
+ device_shutdown();
+}
+
+/**
+ * kernel_restart - reboot the system
+ * @cmd: pointer to buffer containing command to execute for restart
+ * or %NULL
+ *
+ * Shutdown everything and perform a clean reboot.
+ * This is not safe to call in interrupt context.
+ */
+void kernel_restart(char *cmd)
+{
+ kernel_restart_prepare(cmd);
+ if (!cmd)
+ printk(KERN_EMERG "Restarting system.\n");
+ else
+ printk(KERN_EMERG "Restarting system with command '%s'.\n", cmd);
+ machine_restart(cmd);
+}
+EXPORT_SYMBOL_GPL(kernel_restart);
+
+/**
+ * kernel_kexec - reboot the system
+ *
+ * Move into place and start executing a preloaded standalone
+ * executable. If nothing was preloaded return an error.
+ */
+static void kernel_kexec(void)
+{
+#ifdef CONFIG_KEXEC
+ struct kimage *image;
+ image = xchg(&kexec_image, NULL);
+ if (!image)
+ return;
+ kernel_restart_prepare(NULL);
+ printk(KERN_EMERG "Starting new kernel\n");
+ machine_shutdown();
+ machine_kexec(image);
+#endif
+}
+
+void kernel_shutdown_prepare(enum system_states state)
+{
+ blocking_notifier_call_chain(&reboot_notifier_list,
+ (state == SYSTEM_HALT)?SYS_HALT:SYS_POWER_OFF, NULL);
+ system_state = state;
+ device_shutdown();
+}
+/**
+ * kernel_halt - halt the system
+ *
+ * Shutdown everything and perform a clean system halt.
+ */
+void kernel_halt(void)
+{
+ kernel_shutdown_prepare(SYSTEM_HALT);
+ printk(KERN_EMERG "System halted.\n");
+ machine_halt();
+}
+
+EXPORT_SYMBOL_GPL(kernel_halt);
+
+/**
+ * kernel_power_off - power_off the system
+ *
+ * Shutdown everything and perform a clean system power_off.
+ */
+void kernel_power_off(void)
+{
+ kernel_shutdown_prepare(SYSTEM_POWER_OFF);
+ printk(KERN_EMERG "Power down.\n");
+ machine_power_off();
+}
+EXPORT_SYMBOL_GPL(kernel_power_off);
+
+long vs_reboot(unsigned int, void __user *);
/*
* Reboot system call: for obvious reasons only root may call it,
magic2 != LINUX_REBOOT_MAGIC2C))
return -EINVAL;
- if (!vx_check(0, VX_ADMIN|VX_WATCH))
+ /* Instead of trying to make the power_off code look like
+ * halt when pm_power_off is not set do it the easy way.
+ */
+ if ((cmd == LINUX_REBOOT_CMD_POWER_OFF) && !pm_power_off)
+ cmd = LINUX_REBOOT_CMD_HALT;
+
+ if (!vx_check(0, VS_ADMIN|VS_WATCH))
return vs_reboot(cmd, arg);
lock_kernel();
switch (cmd) {
case LINUX_REBOOT_CMD_RESTART:
- notifier_call_chain(&reboot_notifier_list, SYS_RESTART, NULL);
- system_state = SYSTEM_RESTART;
- device_shutdown();
- printk(KERN_EMERG "Restarting system.\n");
- machine_restart(NULL);
+ kernel_restart(NULL);
break;
case LINUX_REBOOT_CMD_CAD_ON:
break;
case LINUX_REBOOT_CMD_HALT:
- notifier_call_chain(&reboot_notifier_list, SYS_HALT, NULL);
- system_state = SYSTEM_HALT;
- device_shutdown();
- printk(KERN_EMERG "System halted.\n");
- machine_halt();
+ kernel_halt();
unlock_kernel();
do_exit(0);
break;
case LINUX_REBOOT_CMD_POWER_OFF:
- notifier_call_chain(&reboot_notifier_list, SYS_POWER_OFF, NULL);
- system_state = SYSTEM_POWER_OFF;
- device_shutdown();
- printk(KERN_EMERG "Power down.\n");
- machine_power_off();
+ kernel_power_off();
unlock_kernel();
do_exit(0);
break;
}
buffer[sizeof(buffer) - 1] = '\0';
- notifier_call_chain(&reboot_notifier_list, SYS_RESTART, buffer);
- system_state = SYSTEM_RESTART;
- device_shutdown();
- printk(KERN_EMERG "Restarting system with command '%s'.\n", buffer);
- machine_restart(buffer);
+ kernel_restart(buffer);
break;
+ case LINUX_REBOOT_CMD_KEXEC:
+ kernel_kexec();
+ unlock_kernel();
+ return -EINVAL;
+
#ifdef CONFIG_SOFTWARE_SUSPEND
case LINUX_REBOOT_CMD_SW_SUSPEND:
{
return 0;
}
-static void deferred_cad(void *dummy)
+static void deferred_cad(struct work_struct *dummy)
{
- notifier_call_chain(&reboot_notifier_list, SYS_RESTART, NULL);
- machine_restart(NULL);
+ kernel_restart(NULL);
}
/*
*/
void ctrl_alt_del(void)
{
- static DECLARE_WORK(cad_work, deferred_cad, NULL);
+ static DECLARE_WORK(cad_work, deferred_cad);
if (C_A_D)
schedule_work(&cad_work);
else
- kill_proc(cad_pid, SIGINT, 1);
+ kill_cad_pid(SIGINT, 1);
}
-
/*
* Unprivileged users may change the real gid to the effective gid
* or vice versa. (BSD-style)
(current->sgid == egid) ||
capable(CAP_SETGID))
new_egid = egid;
- else {
+ else
return -EPERM;
- }
}
- if (new_egid != old_egid)
- {
- current->mm->dumpable = 0;
- wmb();
+ if (new_egid != old_egid) {
+ current->mm->dumpable = suid_dumpable;
+ smp_wmb();
}
if (rgid != (gid_t) -1 ||
(egid != (gid_t) -1 && egid != old_rgid))
current->fsgid = new_egid;
current->egid = new_egid;
current->gid = new_rgid;
+ key_fsgid_changed(current);
+ proc_id_connector(current, PROC_EVENT_GID);
return 0;
}
if (retval)
return retval;
- if (capable(CAP_SETGID))
- {
- if(old_egid != gid)
- {
- current->mm->dumpable=0;
- wmb();
+ if (capable(CAP_SETGID)) {
+ if (old_egid != gid) {
+ current->mm->dumpable = suid_dumpable;
+ smp_wmb();
}
current->gid = current->egid = current->sgid = current->fsgid = gid;
- }
- else if ((gid == current->gid) || (gid == current->sgid))
- {
- if(old_egid != gid)
- {
- current->mm->dumpable=0;
- wmb();
+ } else if ((gid == current->gid) || (gid == current->sgid)) {
+ if (old_egid != gid) {
+ current->mm->dumpable = suid_dumpable;
+ smp_wmb();
}
current->egid = current->fsgid = gid;
}
else
return -EPERM;
+
+ key_fsgid_changed(current);
+ proc_id_connector(current, PROC_EVENT_GID);
return 0;
}
return -EAGAIN;
if (atomic_read(&new_user->processes) >=
- current->rlim[RLIMIT_NPROC].rlim_cur &&
+ current->signal->rlim[RLIMIT_NPROC].rlim_cur &&
new_user != &root_user) {
free_uid(new_user);
return -EAGAIN;
switch_uid(new_user);
- if(dumpclear)
- {
- current->mm->dumpable = 0;
- wmb();
+ if (dumpclear) {
+ current->mm->dumpable = suid_dumpable;
+ smp_wmb();
}
current->uid = new_ruid;
return 0;
if (new_ruid != old_ruid && set_user(new_ruid, new_euid != old_euid) < 0)
return -EAGAIN;
- if (new_euid != old_euid)
- {
- current->mm->dumpable=0;
- wmb();
+ if (new_euid != old_euid) {
+ current->mm->dumpable = suid_dumpable;
+ smp_wmb();
}
current->fsuid = current->euid = new_euid;
if (ruid != (uid_t) -1 ||
current->suid = current->euid;
current->fsuid = current->euid;
+ key_fsuid_changed(current);
+ proc_id_connector(current, PROC_EVENT_UID);
+
return security_task_post_setuid(old_ruid, old_euid, old_suid, LSM_SETID_RE);
}
asmlinkage long sys_setuid(uid_t uid)
{
int old_euid = current->euid;
- int old_ruid, old_suid, new_ruid, new_suid;
+ int old_ruid, old_suid, new_suid;
int retval;
retval = security_task_setuid(uid, (uid_t)-1, (uid_t)-1, LSM_SETID_ID);
if (retval)
return retval;
- old_ruid = new_ruid = current->uid;
+ old_ruid = current->uid;
old_suid = current->suid;
new_suid = old_suid;
} else if ((uid != current->uid) && (uid != new_suid))
return -EPERM;
- if (old_euid != uid)
- {
- current->mm->dumpable = 0;
- wmb();
+ if (old_euid != uid) {
+ current->mm->dumpable = suid_dumpable;
+ smp_wmb();
}
current->fsuid = current->euid = uid;
current->suid = new_suid;
+ key_fsuid_changed(current);
+ proc_id_connector(current, PROC_EVENT_UID);
+
return security_task_post_setuid(old_ruid, old_euid, old_suid, LSM_SETID_ID);
}
return -EAGAIN;
}
if (euid != (uid_t) -1) {
- if (euid != current->euid)
- {
- current->mm->dumpable = 0;
- wmb();
+ if (euid != current->euid) {
+ current->mm->dumpable = suid_dumpable;
+ smp_wmb();
}
current->euid = euid;
}
if (suid != (uid_t) -1)
current->suid = suid;
+ key_fsuid_changed(current);
+ proc_id_connector(current, PROC_EVENT_UID);
+
return security_task_post_setuid(old_ruid, old_euid, old_suid, LSM_SETID_RES);
}
return -EPERM;
}
if (egid != (gid_t) -1) {
- if (egid != current->egid)
- {
- current->mm->dumpable = 0;
- wmb();
+ if (egid != current->egid) {
+ current->mm->dumpable = suid_dumpable;
+ smp_wmb();
}
current->egid = egid;
}
current->gid = rgid;
if (sgid != (gid_t) -1)
current->sgid = sgid;
+
+ key_fsgid_changed(current);
+ proc_id_connector(current, PROC_EVENT_GID);
return 0;
}
if (uid == current->uid || uid == current->euid ||
uid == current->suid || uid == current->fsuid ||
- capable(CAP_SETUID))
- {
- if (uid != old_fsuid)
- {
- current->mm->dumpable = 0;
- wmb();
+ capable(CAP_SETUID)) {
+ if (uid != old_fsuid) {
+ current->mm->dumpable = suid_dumpable;
+ smp_wmb();
}
current->fsuid = uid;
}
+ key_fsuid_changed(current);
+ proc_id_connector(current, PROC_EVENT_UID);
+
security_task_post_setuid(old_fsuid, (uid_t)-1, (uid_t)-1, LSM_SETID_FS);
return old_fsuid;
if (gid == current->gid || gid == current->egid ||
gid == current->sgid || gid == current->fsgid ||
- capable(CAP_SETGID))
- {
- if (gid != old_fsgid)
- {
- current->mm->dumpable = 0;
- wmb();
+ capable(CAP_SETGID)) {
+ if (gid != old_fsgid) {
+ current->mm->dumpable = suid_dumpable;
+ smp_wmb();
}
current->fsgid = gid;
+ key_fsgid_changed(current);
+ proc_id_connector(current, PROC_EVENT_GID);
}
return old_fsgid;
}
struct tms tmp;
struct task_struct *tsk = current;
struct task_struct *t;
- unsigned long utime, stime, cutime, cstime;
+ cputime_t utime, stime, cutime, cstime;
- read_lock(&tasklist_lock);
+ spin_lock_irq(&tsk->sighand->siglock);
utime = tsk->signal->utime;
stime = tsk->signal->stime;
t = tsk;
do {
- utime += t->utime;
- stime += t->stime;
+ utime = cputime_add(utime, t->utime);
+ stime = cputime_add(stime, t->stime);
t = next_thread(t);
} while (t != tsk);
- /*
- * While we have tasklist_lock read-locked, no dying thread
- * can be updating current->signal->[us]time. Instead,
- * we got their counts included in the live thread loop.
- * However, another thread can come in right now and
- * do a wait call that updates current->signal->c[us]time.
- * To make sure we always see that pair updated atomically,
- * we take the siglock around fetching them.
- */
- spin_lock_irq(&tsk->sighand->siglock);
cutime = tsk->signal->cutime;
cstime = tsk->signal->cstime;
spin_unlock_irq(&tsk->sighand->siglock);
- read_unlock(&tasklist_lock);
- tmp.tms_utime = jiffies_to_clock_t(utime);
- tmp.tms_stime = jiffies_to_clock_t(stime);
- tmp.tms_cutime = jiffies_to_clock_t(cutime);
- tmp.tms_cstime = jiffies_to_clock_t(cstime);
+ tmp.tms_utime = cputime_to_clock_t(utime);
+ tmp.tms_stime = cputime_to_clock_t(stime);
+ tmp.tms_cutime = cputime_to_clock_t(cutime);
+ tmp.tms_cstime = cputime_to_clock_t(cstime);
if (copy_to_user(tbuf, &tmp, sizeof(struct tms)))
return -EFAULT;
}
asmlinkage long sys_setpgid(pid_t pid, pid_t pgid)
{
struct task_struct *p;
- int err = -EINVAL;
+ struct task_struct *group_leader = current->group_leader;
pid_t rpgid;
+ int err = -EINVAL;
if (!pid)
- pid = vx_map_pid(current->pid);
+ pid = vx_map_pid(group_leader->pid);
if (!pgid)
pgid = pid;
if (pgid < 0)
if (!thread_group_leader(p))
goto out;
- if (p->parent == current || p->real_parent == current) {
+ if (p->parent == group_leader) {
err = -EPERM;
- if (p->signal->session != current->signal->session)
+ if (process_session(p) != process_session(group_leader))
goto out;
err = -EACCES;
if (p->did_exec)
goto out;
} else {
err = -ESRCH;
- if (p != current)
+ if (p != group_leader)
goto out;
}
goto out;
if (pgid != pid) {
- struct task_struct *p;
+ struct task_struct *g =
+ find_task_by_pid_type(PIDTYPE_PGID, rpgid);
- do_each_task_pid(rpgid, PIDTYPE_PGID, p) {
- if (p->signal->session == current->signal->session)
- goto ok_pgid;
- } while_each_task_pid(rpgid, PIDTYPE_PGID, p);
- goto out;
+ if (!g || process_session(g) != process_session(group_leader))
+ goto out;
}
-ok_pgid:
err = security_task_setpgid(p, rpgid);
if (err)
goto out;
asmlinkage long sys_getpgid(pid_t pid)
{
- if (!pid) {
+ if (!pid)
return vx_rmap_pid(process_group(current));
- } else {
+ else {
int retval;
struct task_struct *p;
asmlinkage long sys_getsid(pid_t pid)
{
- if (!pid) {
- return current->signal->session;
- } else {
+ if (!pid)
+ return process_session(current);
+ else {
int retval;
struct task_struct *p;
p = find_task_by_pid(pid);
retval = -ESRCH;
- if(p) {
+ if (p) {
retval = security_task_getsid(p);
if (!retval)
- retval = p->signal->session;
+ retval = process_session(p);
}
read_unlock(&tasklist_lock);
return retval;
asmlinkage long sys_setsid(void)
{
- struct pid *pid;
+ struct task_struct *group_leader = current->group_leader;
+ pid_t session;
int err = -EPERM;
- if (!thread_group_leader(current))
- return -EINVAL;
-
write_lock_irq(&tasklist_lock);
- pid = find_pid(PIDTYPE_PGID, current->pid);
- if (pid)
+ /* Fail if I am already a session leader */
+ if (group_leader->signal->leader)
+ goto out;
+
+ session = group_leader->pid;
+ /* Fail if a process group id already exists that equals the
+ * proposed session id.
+ *
+ * Don't check if session id == 1 because kernel threads use this
+ * session id and so the check will always fail and make it so
+ * init cannot successfully call setsid.
+ */
+ if (session > 1 && find_task_by_pid_type(PIDTYPE_PGID, session))
goto out;
- current->signal->leader = 1;
- __set_special_pids(current->pid, current->pid);
- current->signal->tty = NULL;
- current->signal->tty_old_pgrp = 0;
- err = process_group(current);
+ group_leader->signal->leader = 1;
+ __set_special_pids(session, session);
+
+ spin_lock(&group_leader->sighand->siglock);
+ group_leader->signal->tty = NULL;
+ group_leader->signal->tty_old_pgrp = 0;
+ spin_unlock(&group_leader->sighand->siglock);
+
+ err = process_group(group_leader);
out:
write_unlock_irq(&tasklist_lock);
return err;
group_info->nblocks = nblocks;
atomic_set(&group_info->usage, 1);
- if (gidsetsize <= NGROUPS_SMALL) {
+ if (gidsetsize <= NGROUPS_SMALL)
group_info->blocks[0] = group_info->small_block;
- } else {
+ else {
for (i = 0; i < nblocks; i++) {
gid_t *b;
b = (void *)__get_free_page(GFP_USER);
/* fill a group_info from a user-space array - it must be allocated already */
static int groups_from_user(struct group_info *group_info,
gid_t __user *grouplist)
- {
+{
int i;
int count = group_info->ngroups;
return 0;
}
-/* a simple shell-metzner sort */
+/* a simple Shell sort */
static void groups_sort(struct group_info *group_info)
{
int base, max, stride;
}
/* a simple bsearch */
-static int groups_search(struct group_info *group_info, gid_t grp)
+int groups_search(struct group_info *group_info, gid_t grp)
{
- int left, right;
+ unsigned int left, right;
if (!group_info)
return 0;
left = 0;
right = group_info->ngroups;
while (left < right) {
- int mid = (left+right)/2;
+ unsigned int mid = (left+right)/2;
int cmp = grp - GROUP_AT(group_info, mid);
if (cmp > 0)
left = mid + 1;
return -EINVAL;
/* no need to grab task_lock here; it cannot change */
- get_group_info(current->group_info);
i = current->group_info->ngroups;
if (gidsetsize) {
if (i > gidsetsize) {
}
}
out:
- put_group_info(current->group_info);
return i;
}
int in_group_p(gid_t grp)
{
int retval = 1;
- if (grp != current->fsgid) {
- get_group_info(current->group_info);
+ if (grp != current->fsgid)
retval = groups_search(current->group_info, grp);
- put_group_info(current->group_info);
- }
return retval;
}
int in_egroup_p(gid_t grp)
{
int retval = 1;
- if (grp != current->egid) {
- get_group_info(current->group_info);
+ if (grp != current->egid)
retval = groups_search(current->group_info, grp);
- put_group_info(current->group_info);
- }
return retval;
}
int errno = 0;
down_read(&uts_sem);
- if (copy_to_user(name, vx_new_utsname(), sizeof *name))
+ if (copy_to_user(name, utsname(), sizeof *name))
errno = -EFAULT;
up_read(&uts_sem);
return errno;
int errno;
char tmp[__NEW_UTS_LEN];
- if (!capable(CAP_SYS_ADMIN) && !vx_ccaps(VXC_SET_UTSNAME))
+ if (!vx_capable(CAP_SYS_ADMIN, VXC_SET_UTSNAME))
return -EPERM;
if (len < 0 || len > __NEW_UTS_LEN)
return -EINVAL;
down_write(&uts_sem);
errno = -EFAULT;
if (!copy_from_user(tmp, name, len)) {
- char *ptr = vx_new_uts(nodename);
-
- memcpy(ptr, tmp, len);
- ptr[len] = 0;
+ memcpy(utsname()->nodename, tmp, len);
+ utsname()->nodename[len] = 0;
errno = 0;
}
up_write(&uts_sem);
asmlinkage long sys_gethostname(char __user *name, int len)
{
int i, errno;
- char *ptr;
if (len < 0)
return -EINVAL;
down_read(&uts_sem);
- ptr = vx_new_uts(nodename);
- i = 1 + strlen(ptr);
+ i = 1 + strlen(utsname()->nodename);
if (i > len)
i = len;
errno = 0;
- if (copy_to_user(name, ptr, i))
+ if (copy_to_user(name, utsname()->nodename, i))
errno = -EFAULT;
up_read(&uts_sem);
return errno;
int errno;
char tmp[__NEW_UTS_LEN];
- if (!capable(CAP_SYS_ADMIN) && !vx_ccaps(VXC_SET_UTSNAME))
+ if (!vx_capable(CAP_SYS_ADMIN, VXC_SET_UTSNAME))
return -EPERM;
if (len < 0 || len > __NEW_UTS_LEN)
return -EINVAL;
down_write(&uts_sem);
errno = -EFAULT;
if (!copy_from_user(tmp, name, len)) {
- char *ptr = vx_new_uts(domainname);
-
- memcpy(ptr, tmp, len);
- ptr[len] = 0;
+ memcpy(utsname()->domainname, tmp, len);
+ utsname()->domainname[len] = 0;
errno = 0;
}
up_write(&uts_sem);
{
if (resource >= RLIM_NLIMITS)
return -EINVAL;
- else
- return copy_to_user(rlim, current->rlim + resource, sizeof(*rlim))
- ? -EFAULT : 0;
+ else {
+ struct rlimit value;
+ task_lock(current->group_leader);
+ value = current->signal->rlim[resource];
+ task_unlock(current->group_leader);
+ return copy_to_user(rlim, &value, sizeof(*rlim)) ? -EFAULT : 0;
+ }
}
#ifdef __ARCH_WANT_SYS_OLD_GETRLIMIT
if (resource >= RLIM_NLIMITS)
return -EINVAL;
- memcpy(&x, current->rlim + resource, sizeof(*rlim));
- if(x.rlim_cur > 0x7FFFFFFF)
+ task_lock(current->group_leader);
+ x = current->signal->rlim[resource];
+ task_unlock(current->group_leader);
+ if (x.rlim_cur > 0x7FFFFFFF)
x.rlim_cur = 0x7FFFFFFF;
- if(x.rlim_max > 0x7FFFFFFF)
+ if (x.rlim_max > 0x7FFFFFFF)
x.rlim_max = 0x7FFFFFFF;
return copy_to_user(rlim, &x, sizeof(x))?-EFAULT:0;
}
asmlinkage long sys_setrlimit(unsigned int resource, struct rlimit __user *rlim)
{
struct rlimit new_rlim, *old_rlim;
+ unsigned long it_prof_secs;
int retval;
if (resource >= RLIM_NLIMITS)
return -EINVAL;
- if(copy_from_user(&new_rlim, rlim, sizeof(*rlim)))
+ if (copy_from_user(&new_rlim, rlim, sizeof(*rlim)))
return -EFAULT;
- if (new_rlim.rlim_cur > new_rlim.rlim_max)
- return -EINVAL;
- old_rlim = current->rlim + resource;
- if (((new_rlim.rlim_cur > old_rlim->rlim_max) ||
- (new_rlim.rlim_max > old_rlim->rlim_max)) &&
- !capable(CAP_SYS_RESOURCE) && !vx_ccaps(VXC_SET_RLIMIT))
+ if (new_rlim.rlim_cur > new_rlim.rlim_max)
+ return -EINVAL;
+ old_rlim = current->signal->rlim + resource;
+ if ((new_rlim.rlim_max > old_rlim->rlim_max) &&
+ !vx_capable(CAP_SYS_RESOURCE, VXC_SET_RLIMIT))
+ return -EPERM;
+ if (resource == RLIMIT_NOFILE && new_rlim.rlim_max > NR_OPEN)
return -EPERM;
- if (resource == RLIMIT_NOFILE) {
- if (new_rlim.rlim_cur > NR_OPEN || new_rlim.rlim_max > NR_OPEN)
- return -EPERM;
- }
retval = security_task_setrlimit(resource, &new_rlim);
if (retval)
return retval;
+ task_lock(current->group_leader);
*old_rlim = new_rlim;
+ task_unlock(current->group_leader);
+
+ if (resource != RLIMIT_CPU)
+ goto out;
+
+ /*
+ * RLIMIT_CPU handling. Note that the kernel fails to return an error
+ * code if it rejected the user's attempt to set RLIMIT_CPU. This is a
+ * very long-standing error, and fixing it now risks breakage of
+ * applications, so we live with it
+ */
+ if (new_rlim.rlim_cur == RLIM_INFINITY)
+ goto out;
+
+ it_prof_secs = cputime_to_secs(current->signal->it_prof_expires);
+ if (it_prof_secs == 0 || new_rlim.rlim_cur <= it_prof_secs) {
+ unsigned long rlim_cur = new_rlim.rlim_cur;
+ cputime_t cputime;
+
+ if (rlim_cur == 0) {
+ /*
+ * The caller is asking for an immediate RLIMIT_CPU
+ * expiry. But we use the zero value to mean "it was
+ * never set". So let's cheat and make it one second
+ * instead
+ */
+ rlim_cur = 1;
+ }
+ cputime = secs_to_cputime(rlim_cur);
+ read_lock(&tasklist_lock);
+ spin_lock_irq(¤t->sighand->siglock);
+ set_process_cpu_timer(current, CPUCLOCK_PROF, &cputime, NULL);
+ spin_unlock_irq(¤t->sighand->siglock);
+ read_unlock(&tasklist_lock);
+ }
+out:
return 0;
}
* a lot simpler! (Which we're not doing right now because we're not
* measuring them yet).
*
- * This expects to be called with tasklist_lock read-locked or better,
- * and the siglock not locked. It may momentarily take the siglock.
- *
* When sampling multiple threads for RUSAGE_SELF, under SMP we might have
* races with threads incrementing their own counters. But since word
* reads are atomic, we either get new values or old values and we don't
* the c* fields from p->signal from races with exit.c updating those
* fields when reaping, so a sample either gets all the additions of a
* given child after it's reaped, or none so this sample is before reaping.
+ *
+ * Locking:
+ * We need to take the siglock for CHILDEREN, SELF and BOTH
+ * for the cases current multithreaded, non-current single threaded
+ * non-current multithreaded. Thread traversal is now safe with
+ * the siglock held.
+ * Strictly speaking, we donot need to take the siglock if we are current and
+ * single threaded, as no one else can take our signal_struct away, no one
+ * else can reap the children to update signal->c* counters, and no one else
+ * can race with the signal-> fields. If we do not take any lock, the
+ * signal-> fields could be read out of order while another thread was just
+ * exiting. So we should place a read memory barrier when we avoid the lock.
+ * On the writer side, write memory barrier is implied in __exit_signal
+ * as __exit_signal releases the siglock spinlock after updating the signal->
+ * fields. But we don't do this yet to keep things simple.
+ *
*/
-void k_getrusage(struct task_struct *p, int who, struct rusage *r)
+static void k_getrusage(struct task_struct *p, int who, struct rusage *r)
{
struct task_struct *t;
unsigned long flags;
- unsigned long utime, stime;
+ cputime_t utime, stime;
memset((char *) r, 0, sizeof *r);
+ utime = stime = cputime_zero;
- if (unlikely(!p->signal))
+ rcu_read_lock();
+ if (!lock_task_sighand(p, &flags)) {
+ rcu_read_unlock();
return;
+ }
switch (who) {
+ case RUSAGE_BOTH:
case RUSAGE_CHILDREN:
- spin_lock_irqsave(&p->sighand->siglock, flags);
utime = p->signal->cutime;
stime = p->signal->cstime;
r->ru_nvcsw = p->signal->cnvcsw;
r->ru_nivcsw = p->signal->cnivcsw;
r->ru_minflt = p->signal->cmin_flt;
r->ru_majflt = p->signal->cmaj_flt;
- spin_unlock_irqrestore(&p->sighand->siglock, flags);
- jiffies_to_timeval(utime, &r->ru_utime);
- jiffies_to_timeval(stime, &r->ru_stime);
- break;
+
+ if (who == RUSAGE_CHILDREN)
+ break;
+
case RUSAGE_SELF:
- spin_lock_irqsave(&p->sighand->siglock, flags);
- utime = stime = 0;
- goto sum_group;
- case RUSAGE_BOTH:
- spin_lock_irqsave(&p->sighand->siglock, flags);
- utime = p->signal->cutime;
- stime = p->signal->cstime;
- r->ru_nvcsw = p->signal->cnvcsw;
- r->ru_nivcsw = p->signal->cnivcsw;
- r->ru_minflt = p->signal->cmin_flt;
- r->ru_majflt = p->signal->cmaj_flt;
- sum_group:
- utime += p->signal->utime;
- stime += p->signal->stime;
+ utime = cputime_add(utime, p->signal->utime);
+ stime = cputime_add(stime, p->signal->stime);
r->ru_nvcsw += p->signal->nvcsw;
r->ru_nivcsw += p->signal->nivcsw;
r->ru_minflt += p->signal->min_flt;
r->ru_majflt += p->signal->maj_flt;
t = p;
do {
- utime += t->utime;
- stime += t->stime;
+ utime = cputime_add(utime, t->utime);
+ stime = cputime_add(stime, t->stime);
r->ru_nvcsw += t->nvcsw;
r->ru_nivcsw += t->nivcsw;
r->ru_minflt += t->min_flt;
r->ru_majflt += t->maj_flt;
t = next_thread(t);
} while (t != p);
- spin_unlock_irqrestore(&p->sighand->siglock, flags);
- jiffies_to_timeval(utime, &r->ru_utime);
- jiffies_to_timeval(stime, &r->ru_stime);
break;
+
default:
BUG();
}
+
+ unlock_task_sighand(p, &flags);
+ rcu_read_unlock();
+
+ cputime_to_timeval(utime, &r->ru_utime);
+ cputime_to_timeval(stime, &r->ru_stime);
}
int getrusage(struct task_struct *p, int who, struct rusage __user *ru)
{
struct rusage r;
- read_lock(&tasklist_lock);
k_getrusage(p, who, &r);
- read_unlock(&tasklist_lock);
return copy_to_user(ru, &r, sizeof(r)) ? -EFAULT : 0;
}
asmlinkage long sys_prctl(int option, unsigned long arg2, unsigned long arg3,
unsigned long arg4, unsigned long arg5)
{
- int error;
- int sig;
+ long error;
error = security_task_prctl(option, arg2, arg3, arg4, arg5);
if (error)
switch (option) {
case PR_SET_PDEATHSIG:
- sig = arg2;
- if (sig < 0 || sig > _NSIG) {
+ if (!valid_signal(arg2)) {
error = -EINVAL;
break;
}
- current->pdeath_signal = sig;
+ current->pdeath_signal = arg2;
break;
case PR_GET_PDEATHSIG:
error = put_user(current->pdeath_signal, (int __user *)arg2);
break;
case PR_GET_DUMPABLE:
- if (current->mm->dumpable)
- error = 1;
+ error = current->mm->dumpable;
break;
case PR_SET_DUMPABLE:
- if (arg2 != 0 && arg2 != 1) {
+ if (arg2 < 0 || arg2 > 1) {
error = -EINVAL;
break;
}
set_task_comm(me, ncomm);
return 0;
}
+ case PR_GET_NAME: {
+ struct task_struct *me = current;
+ unsigned char tcomm[sizeof(me->comm)];
+
+ get_task_comm(tcomm, me);
+ if (copy_to_user((char __user *)arg2, tcomm, sizeof(tcomm)))
+ return -EFAULT;
+ return 0;
+ }
+ case PR_GET_ENDIAN:
+ error = GET_ENDIAN(current, arg2);
+ break;
+ case PR_SET_ENDIAN:
+ error = SET_ENDIAN(current, arg2);
+ break;
+
default:
error = -EINVAL;
break;
}
return error;
}
+
+asmlinkage long sys_getcpu(unsigned __user *cpup, unsigned __user *nodep,
+ struct getcpu_cache __user *cache)
+{
+ int err = 0;
+ int cpu = raw_smp_processor_id();
+ if (cpup)
+ err |= put_user(cpu, cpup);
+ if (nodep)
+ err |= put_user(cpu_to_node(cpu), nodep);
+ if (cache) {
+ /*
+ * The cache is not needed for this implementation,
+ * but make sure user programs pass something
+ * valid. vsyscall implementations can instead make
+ * good use of the cache. Only use t0 and t1 because
+ * these are available in both 32bit and 64bit ABI (no
+ * need for a compat_getcpu). 32bit has enough
+ * padding
+ */
+ unsigned long t0, t1;
+ get_user(t0, &cache->blob[0]);
+ get_user(t1, &cache->blob[1]);
+ t0++;
+ t1++;
+ put_user(t0, &cache->blob[0]);
+ put_user(t1, &cache->blob[1]);
+ }
+ return err ? -EFAULT : 0;
+}