X-Git-Url: http://git.onelab.eu/?a=blobdiff_plain;f=kernel%2Fsys.c;h=8012768627c5303ef59f9229ae5490e6e6e95947;hb=9464c7cf61b9433057924c36e6e02f303a00e768;hp=6f2b0f170fd9c9a395f8b94743a76dc3d44924e2;hpb=be285d3172ebf260c8ff3ca00e833590748d7a42;p=linux-2.6.git diff --git a/kernel/sys.c b/kernel/sys.c index 6f2b0f170..801276862 100644 --- a/kernel/sys.c +++ b/kernel/sys.c @@ -5,7 +5,6 @@ */ #include -#include #include #include #include @@ -18,13 +17,26 @@ #include #include #include +#include +#include #include +#include #include +#include #include +#include #include #include #include -#include +#include +#include +#include +#include +#include + +#include +#include +#include #include #include @@ -86,99 +98,304 @@ int cad_pid = 1; * and the like. */ -static struct notifier_block *reboot_notifier_list; -rwlock_t notifier_lock = RW_LOCK_UNLOCKED; +static BLOCKING_NOTIFIER_HEAD(reboot_notifier_list); + +/* + * Notifier chain core routines. The exported routines below + * are layered on top of these, with appropriate locking added. + */ + +static int notifier_chain_register(struct notifier_block **nl, + struct notifier_block *n) +{ + while ((*nl) != NULL) { + if (n->priority > (*nl)->priority) + break; + nl = &((*nl)->next); + } + n->next = *nl; + rcu_assign_pointer(*nl, n); + return 0; +} + +static int notifier_chain_unregister(struct notifier_block **nl, + struct notifier_block *n) +{ + while ((*nl) != NULL) { + if ((*nl) == n) { + rcu_assign_pointer(*nl, n->next); + return 0; + } + nl = &((*nl)->next); + } + return -ENOENT; +} + +static int __kprobes notifier_call_chain(struct notifier_block **nl, + unsigned long val, void *v) +{ + int ret = NOTIFY_DONE; + struct notifier_block *nb; + + nb = rcu_dereference(*nl); + while (nb) { + ret = nb->notifier_call(nb, val, v); + if ((ret & NOTIFY_STOP_MASK) == NOTIFY_STOP_MASK) + break; + nb = rcu_dereference(nb->next); + } + return ret; +} + +/* + * Atomic notifier chain routines. Registration and unregistration + * use a mutex, and call_chain is synchronized by RCU (no locks). + */ /** - * notifier_chain_register - Add notifier to a notifier chain - * @list: Pointer to root list pointer + * atomic_notifier_chain_register - Add notifier to an atomic notifier chain + * @nh: Pointer to head of the atomic notifier chain * @n: New entry in notifier chain * - * Adds a notifier to a notifier chain. + * Adds a notifier to an atomic notifier chain. * * Currently always returns zero. */ + +int atomic_notifier_chain_register(struct atomic_notifier_head *nh, + struct notifier_block *n) +{ + unsigned long flags; + int ret; + + spin_lock_irqsave(&nh->lock, flags); + ret = notifier_chain_register(&nh->head, n); + spin_unlock_irqrestore(&nh->lock, flags); + return ret; +} + +EXPORT_SYMBOL_GPL(atomic_notifier_chain_register); + +/** + * atomic_notifier_chain_unregister - Remove notifier from an atomic notifier chain + * @nh: Pointer to head of the atomic notifier chain + * @n: Entry to remove from notifier chain + * + * Removes a notifier from an atomic notifier chain. + * + * Returns zero on success or %-ENOENT on failure. + */ +int atomic_notifier_chain_unregister(struct atomic_notifier_head *nh, + struct notifier_block *n) +{ + unsigned long flags; + int ret; + + spin_lock_irqsave(&nh->lock, flags); + ret = notifier_chain_unregister(&nh->head, n); + spin_unlock_irqrestore(&nh->lock, flags); + synchronize_rcu(); + return ret; +} + +EXPORT_SYMBOL_GPL(atomic_notifier_chain_unregister); + +/** + * atomic_notifier_call_chain - Call functions in an atomic notifier chain + * @nh: Pointer to head of the atomic notifier chain + * @val: Value passed unmodified to notifier function + * @v: Pointer passed unmodified to notifier function + * + * Calls each function in a notifier chain in turn. The functions + * run in an atomic context, so they must not block. + * This routine uses RCU to synchronize with changes to the chain. + * + * If the return value of the notifier can be and'ed + * with %NOTIFY_STOP_MASK then atomic_notifier_call_chain + * will return immediately, with the return value of + * the notifier function which halted execution. + * Otherwise the return value is the return value + * of the last notifier function called. + */ -int notifier_chain_register(struct notifier_block **list, struct notifier_block *n) +int atomic_notifier_call_chain(struct atomic_notifier_head *nh, + unsigned long val, void *v) { - write_lock(¬ifier_lock); - while(*list) - { - if(n->priority > (*list)->priority) - break; - list= &((*list)->next); - } - n->next = *list; - *list=n; - write_unlock(¬ifier_lock); - return 0; + int ret; + + rcu_read_lock(); + ret = notifier_call_chain(&nh->head, val, v); + rcu_read_unlock(); + return ret; } -EXPORT_SYMBOL(notifier_chain_register); +EXPORT_SYMBOL_GPL(atomic_notifier_call_chain); + +/* + * Blocking notifier chain routines. All access to the chain is + * synchronized by an rwsem. + */ /** - * notifier_chain_unregister - Remove notifier from a notifier chain - * @nl: Pointer to root list pointer + * blocking_notifier_chain_register - Add notifier to a blocking notifier chain + * @nh: Pointer to head of the blocking notifier chain * @n: New entry in notifier chain * - * Removes a notifier from a notifier chain. + * Adds a notifier to a blocking notifier chain. + * Must be called in process context. * - * Returns zero on success, or %-ENOENT on failure. + * Currently always returns zero. */ -int notifier_chain_unregister(struct notifier_block **nl, struct notifier_block *n) +int blocking_notifier_chain_register(struct blocking_notifier_head *nh, + struct notifier_block *n) { - write_lock(¬ifier_lock); - while((*nl)!=NULL) - { - if((*nl)==n) - { - *nl=n->next; - write_unlock(¬ifier_lock); - return 0; - } - nl=&((*nl)->next); - } - write_unlock(¬ifier_lock); - return -ENOENT; + int ret; + + /* + * This code gets used during boot-up, when task switching is + * not yet working and interrupts must remain disabled. At + * such times we must not call down_write(). + */ + if (unlikely(system_state == SYSTEM_BOOTING)) + return notifier_chain_register(&nh->head, n); + + down_write(&nh->rwsem); + ret = notifier_chain_register(&nh->head, n); + up_write(&nh->rwsem); + return ret; +} + +EXPORT_SYMBOL_GPL(blocking_notifier_chain_register); + +/** + * blocking_notifier_chain_unregister - Remove notifier from a blocking notifier chain + * @nh: Pointer to head of the blocking notifier chain + * @n: Entry to remove from notifier chain + * + * Removes a notifier from a blocking notifier chain. + * Must be called from process context. + * + * Returns zero on success or %-ENOENT on failure. + */ +int blocking_notifier_chain_unregister(struct blocking_notifier_head *nh, + struct notifier_block *n) +{ + int ret; + + /* + * This code gets used during boot-up, when task switching is + * not yet working and interrupts must remain disabled. At + * such times we must not call down_write(). + */ + if (unlikely(system_state == SYSTEM_BOOTING)) + return notifier_chain_unregister(&nh->head, n); + + down_write(&nh->rwsem); + ret = notifier_chain_unregister(&nh->head, n); + up_write(&nh->rwsem); + return ret; } -EXPORT_SYMBOL(notifier_chain_unregister); +EXPORT_SYMBOL_GPL(blocking_notifier_chain_unregister); /** - * notifier_call_chain - Call functions in a notifier chain - * @n: Pointer to root pointer of notifier chain + * blocking_notifier_call_chain - Call functions in a blocking notifier chain + * @nh: Pointer to head of the blocking notifier chain * @val: Value passed unmodified to notifier function * @v: Pointer passed unmodified to notifier function * - * Calls each function in a notifier chain in turn. + * Calls each function in a notifier chain in turn. The functions + * run in a process context, so they are allowed to block. * - * If the return value of the notifier can be and'd - * with %NOTIFY_STOP_MASK, then notifier_call_chain + * If the return value of the notifier can be and'ed + * with %NOTIFY_STOP_MASK then blocking_notifier_call_chain * will return immediately, with the return value of * the notifier function which halted execution. - * Otherwise, the return value is the return value + * Otherwise the return value is the return value * of the last notifier function called. */ -int notifier_call_chain(struct notifier_block **n, unsigned long val, void *v) +int blocking_notifier_call_chain(struct blocking_notifier_head *nh, + unsigned long val, void *v) { - int ret=NOTIFY_DONE; - struct notifier_block *nb = *n; + int ret; - while(nb) - { - ret=nb->notifier_call(nb,val,v); - if(ret&NOTIFY_STOP_MASK) - { - return ret; - } - nb=nb->next; - } + down_read(&nh->rwsem); + ret = notifier_call_chain(&nh->head, val, v); + up_read(&nh->rwsem); return ret; } -EXPORT_SYMBOL(notifier_call_chain); +EXPORT_SYMBOL_GPL(blocking_notifier_call_chain); + +/* + * Raw notifier chain routines. There is no protection; + * the caller must provide it. Use at your own risk! + */ + +/** + * raw_notifier_chain_register - Add notifier to a raw notifier chain + * @nh: Pointer to head of the raw notifier chain + * @n: New entry in notifier chain + * + * Adds a notifier to a raw notifier chain. + * All locking must be provided by the caller. + * + * Currently always returns zero. + */ + +int raw_notifier_chain_register(struct raw_notifier_head *nh, + struct notifier_block *n) +{ + return notifier_chain_register(&nh->head, n); +} + +EXPORT_SYMBOL_GPL(raw_notifier_chain_register); + +/** + * raw_notifier_chain_unregister - Remove notifier from a raw notifier chain + * @nh: Pointer to head of the raw notifier chain + * @n: Entry to remove from notifier chain + * + * Removes a notifier from a raw notifier chain. + * All locking must be provided by the caller. + * + * Returns zero on success or %-ENOENT on failure. + */ +int raw_notifier_chain_unregister(struct raw_notifier_head *nh, + struct notifier_block *n) +{ + return notifier_chain_unregister(&nh->head, n); +} + +EXPORT_SYMBOL_GPL(raw_notifier_chain_unregister); + +/** + * raw_notifier_call_chain - Call functions in a raw notifier chain + * @nh: Pointer to head of the raw notifier chain + * @val: Value passed unmodified to notifier function + * @v: Pointer passed unmodified to notifier function + * + * Calls each function in a notifier chain in turn. The functions + * run in an undefined context. + * All locking must be provided by the caller. + * + * If the return value of the notifier can be and'ed + * with %NOTIFY_STOP_MASK then raw_notifier_call_chain + * will return immediately, with the return value of + * the notifier function which halted execution. + * Otherwise the return value is the return value + * of the last notifier function called. + */ + +int raw_notifier_call_chain(struct raw_notifier_head *nh, + unsigned long val, void *v) +{ + return notifier_call_chain(&nh->head, val, v); +} + +EXPORT_SYMBOL_GPL(raw_notifier_call_chain); /** * register_reboot_notifier - Register function to be called at reboot time @@ -187,13 +404,13 @@ EXPORT_SYMBOL(notifier_call_chain); * Registers a function with the list of functions * to be called at reboot time. * - * Currently always returns zero, as notifier_chain_register + * Currently always returns zero, as blocking_notifier_chain_register * always returns zero. */ int register_reboot_notifier(struct notifier_block * nb) { - return notifier_chain_register(&reboot_notifier_list, nb); + return blocking_notifier_chain_register(&reboot_notifier_list, nb); } EXPORT_SYMBOL(register_reboot_notifier); @@ -210,90 +427,25 @@ EXPORT_SYMBOL(register_reboot_notifier); int unregister_reboot_notifier(struct notifier_block * nb) { - return notifier_chain_unregister(&reboot_notifier_list, nb); + return blocking_notifier_chain_unregister(&reboot_notifier_list, nb); } EXPORT_SYMBOL(unregister_reboot_notifier); -asmlinkage long sys_ni_syscall(void) -{ - return -ENOSYS; -} - -cond_syscall(sys_nfsservctl) -cond_syscall(sys_quotactl) -cond_syscall(sys_acct) -cond_syscall(sys_lookup_dcookie) -cond_syscall(sys_swapon) -cond_syscall(sys_swapoff) -cond_syscall(sys_init_module) -cond_syscall(sys_delete_module) -cond_syscall(sys_socketpair) -cond_syscall(sys_bind) -cond_syscall(sys_listen) -cond_syscall(sys_accept) -cond_syscall(sys_connect) -cond_syscall(sys_getsockname) -cond_syscall(sys_getpeername) -cond_syscall(sys_sendto) -cond_syscall(sys_send) -cond_syscall(sys_recvfrom) -cond_syscall(sys_recv) -cond_syscall(sys_socket) -cond_syscall(sys_setsockopt) -cond_syscall(sys_getsockopt) -cond_syscall(sys_shutdown) -cond_syscall(sys_sendmsg) -cond_syscall(sys_recvmsg) -cond_syscall(sys_socketcall) -cond_syscall(sys_futex) -cond_syscall(compat_sys_futex) -cond_syscall(sys_epoll_create) -cond_syscall(sys_epoll_ctl) -cond_syscall(sys_epoll_wait) -cond_syscall(sys_semget) -cond_syscall(sys_semop) -cond_syscall(sys_semtimedop) -cond_syscall(sys_semctl) -cond_syscall(sys_msgget) -cond_syscall(sys_msgsnd) -cond_syscall(sys_msgrcv) -cond_syscall(sys_msgctl) -cond_syscall(sys_shmget) -cond_syscall(sys_shmdt) -cond_syscall(sys_shmctl) -cond_syscall(sys_mq_open) -cond_syscall(sys_mq_unlink) -cond_syscall(sys_mq_timedsend) -cond_syscall(sys_mq_timedreceive) -cond_syscall(sys_mq_notify) -cond_syscall(sys_mq_getsetattr) -cond_syscall(compat_sys_mq_open) -cond_syscall(compat_sys_mq_timedsend) -cond_syscall(compat_sys_mq_timedreceive) -cond_syscall(compat_sys_mq_notify) -cond_syscall(compat_sys_mq_getsetattr) -cond_syscall(sys_mbind) -cond_syscall(sys_get_mempolicy) -cond_syscall(sys_set_mempolicy) -cond_syscall(compat_get_mempolicy) - -/* arch-specific weak syscall entries */ -cond_syscall(sys_pciconfig_read) -cond_syscall(sys_pciconfig_write) -cond_syscall(sys_pciconfig_iobase) - static int set_one_prio(struct task_struct *p, int niceval, int error) { int no_nice; if (p->uid != current->euid && - p->uid != current->uid && !capable(CAP_SYS_NICE)) { + p->euid != current->euid && !capable(CAP_SYS_NICE)) { error = -EPERM; goto out; } - if (niceval < task_nice(p) && !capable(CAP_SYS_NICE)) { - error = -EACCES; + if (niceval < task_nice(p) && !can_nice(p, niceval)) { + if (vx_flags(VXF_IGNEG_NICE, 0)) + error = 0; + else + error = -EACCES; goto out; } no_nice = security_task_setnice(p, niceval); @@ -312,8 +464,6 @@ asmlinkage long sys_setpriority(int which, int who, int niceval) { struct task_struct *g, *p; struct user_struct *user; - struct pid *pid; - struct list_head *l; int error = -EINVAL; if (which > 2 || which < 0) @@ -338,23 +488,24 @@ asmlinkage long sys_setpriority(int which, int who, int niceval) case PRIO_PGRP: if (!who) who = process_group(current); - for_each_task_pid(who, PIDTYPE_PGID, p, l, pid) + do_each_task_pid(who, PIDTYPE_PGID, p) { error = set_one_prio(p, niceval, error); + } while_each_task_pid(who, PIDTYPE_PGID, p); break; case PRIO_USER: + user = current->user; if (!who) - user = current->user; + who = current->uid; else - user = find_user(vx_current_xid(), who); - - if (!user) - goto out_unlock; + if ((who != current->uid) && + !(user = find_user(vx_current_xid(), who))) + goto out_unlock; /* No processes for this user */ do_each_thread(g, p) if (p->uid == who) error = set_one_prio(p, niceval, error); while_each_thread(g, p); - if (who) + if (who != current->uid) free_uid(user); /* For find_user() */ break; } @@ -373,8 +524,6 @@ out: asmlinkage long sys_getpriority(int which, int who) { struct task_struct *g, *p; - struct list_head *l; - struct pid *pid; struct user_struct *user; long niceval, retval = -ESRCH; @@ -396,20 +545,20 @@ asmlinkage long sys_getpriority(int which, int who) case PRIO_PGRP: if (!who) who = process_group(current); - for_each_task_pid(who, PIDTYPE_PGID, p, l, pid) { + do_each_task_pid(who, PIDTYPE_PGID, p) { niceval = 20 - task_nice(p); if (niceval > retval) retval = niceval; - } + } while_each_task_pid(who, PIDTYPE_PGID, p); break; case PRIO_USER: + user = current->user; if (!who) - user = current->user; + who = current->uid; else - user = find_user(vx_current_xid(), who); - - if (!user) - goto out_unlock; + if ((who != current->uid) && + !(user = find_user(vx_current_xid(), who))) + goto out_unlock; /* No processes for this user */ do_each_thread(g, p) if (p->uid == who) { @@ -418,7 +567,7 @@ asmlinkage long sys_getpriority(int which, int who) retval = niceval; } while_each_thread(g, p); - if (who) + if (who != current->uid) free_uid(user); /* for find_user() */ break; } @@ -428,72 +577,105 @@ out_unlock: return retval; } -/* - * vshelper path is set via /proc/sys - * invoked by vserver sys_reboot(), with - * the following arguments - * - * argv [0] = vshelper_path; - * argv [1] = action: "restart", "halt", "poweroff", ... - * argv [2] = context identifier - * argv [3] = additional argument (restart2) +/** + * emergency_restart - reboot the system * - * envp [*] = type-specific parameters + * Without shutting down any hardware or taking any locks + * reboot the system. This is called when we know we are in + * trouble so this is our best effort to reboot. This is + * safe to call in interrupt context. */ -char vshelper_path[255] = "/sbin/vshelper"; - -long vs_reboot(unsigned int cmd, void * arg) +void emergency_restart(void) { - char id_buf[8], cmd_buf[32]; - char uid_buf[32], pid_buf[32]; - char buffer[256]; - - char *argv[] = {vshelper_path, NULL, id_buf, NULL, 0}; - char *envp[] = {"HOME=/", "TERM=linux", - "PATH=/sbin:/usr/sbin:/bin:/usr/bin", - uid_buf, pid_buf, cmd_buf, 0}; - - snprintf(id_buf, sizeof(id_buf)-1, "%d", vx_current_xid()); - - snprintf(cmd_buf, sizeof(cmd_buf)-1, "VS_CMD=%08x", cmd); - snprintf(uid_buf, sizeof(uid_buf)-1, "VS_UID=%d", current->uid); - snprintf(pid_buf, sizeof(pid_buf)-1, "VS_PID=%d", current->pid); + machine_emergency_restart(); +} +EXPORT_SYMBOL_GPL(emergency_restart); - switch (cmd) { - case LINUX_REBOOT_CMD_RESTART: - argv[1] = "restart"; - break; +void kernel_restart_prepare(char *cmd) +{ + blocking_notifier_call_chain(&reboot_notifier_list, SYS_RESTART, cmd); + system_state = SYSTEM_RESTART; + device_shutdown(); +} - case LINUX_REBOOT_CMD_HALT: - argv[1] = "halt"; - break; +/** + * kernel_restart - reboot the system + * @cmd: pointer to buffer containing command to execute for restart + * or %NULL + * + * Shutdown everything and perform a clean reboot. + * This is not safe to call in interrupt context. + */ +void kernel_restart(char *cmd) +{ + kernel_restart_prepare(cmd); + if (!cmd) { + printk(KERN_EMERG "Restarting system.\n"); + } else { + printk(KERN_EMERG "Restarting system with command '%s'.\n", cmd); + } + printk(".\n"); + machine_restart(cmd); +} +EXPORT_SYMBOL_GPL(kernel_restart); - case LINUX_REBOOT_CMD_POWER_OFF: - argv[1] = "poweroff"; - break; +/** + * kernel_kexec - reboot the system + * + * Move into place and start executing a preloaded standalone + * executable. If nothing was preloaded return an error. + */ +void kernel_kexec(void) +{ +#ifdef CONFIG_KEXEC + struct kimage *image; + image = xchg(&kexec_image, NULL); + if (!image) { + return; + } + kernel_restart_prepare(NULL); + printk(KERN_EMERG "Starting new kernel\n"); + machine_shutdown(); + machine_kexec(image); +#endif +} +EXPORT_SYMBOL_GPL(kernel_kexec); - case LINUX_REBOOT_CMD_SW_SUSPEND: - argv[1] = "swsusp"; - break; +void kernel_shutdown_prepare(enum system_states state) +{ + blocking_notifier_call_chain(&reboot_notifier_list, + (state == SYSTEM_HALT)?SYS_HALT:SYS_POWER_OFF, NULL); + system_state = state; + device_shutdown(); +} +/** + * kernel_halt - halt the system + * + * Shutdown everything and perform a clean system halt. + */ +void kernel_halt(void) +{ + kernel_shutdown_prepare(SYSTEM_HALT); + printk(KERN_EMERG "System halted.\n"); + machine_halt(); +} - case LINUX_REBOOT_CMD_RESTART2: - if (strncpy_from_user(&buffer[0], (char *)arg, sizeof(buffer) - 1) < 0) - return -EFAULT; - argv[3] = buffer; - default: - argv[1] = "restart2"; - break; - } +EXPORT_SYMBOL_GPL(kernel_halt); - /* maybe we should wait ? */ - if (call_usermodehelper(*argv, argv, envp, 0)) { - printk( KERN_WARNING - "vs_reboot(): failed to exec (%s %s %s %s)\n", - vshelper_path, argv[1], argv[2], argv[3]); - return -EPERM; - } - return 0; +/** + * kernel_power_off - power_off the system + * + * Shutdown everything and perform a clean system power_off. + */ +void kernel_power_off(void) +{ + kernel_shutdown_prepare(SYSTEM_POWER_OFF); + printk(KERN_EMERG "Power down.\n"); + machine_power_off(); } +EXPORT_SYMBOL_GPL(kernel_power_off); + +long vs_reboot(unsigned int, void __user *); /* * Reboot system call: for obvious reasons only root may call it, @@ -519,17 +701,19 @@ asmlinkage long sys_reboot(int magic1, int magic2, unsigned int cmd, void __user magic2 != LINUX_REBOOT_MAGIC2C)) return -EINVAL; + /* Instead of trying to make the power_off code look like + * halt when pm_power_off is not set do it the easy way. + */ + if ((cmd == LINUX_REBOOT_CMD_POWER_OFF) && !pm_power_off) + cmd = LINUX_REBOOT_CMD_HALT; + if (!vx_check(0, VX_ADMIN|VX_WATCH)) return vs_reboot(cmd, arg); lock_kernel(); switch (cmd) { case LINUX_REBOOT_CMD_RESTART: - notifier_call_chain(&reboot_notifier_list, SYS_RESTART, NULL); - system_state = SYSTEM_RESTART; - device_shutdown(); - printk(KERN_EMERG "Restarting system.\n"); - machine_restart(NULL); + kernel_restart(NULL); break; case LINUX_REBOOT_CMD_CAD_ON: @@ -541,21 +725,13 @@ asmlinkage long sys_reboot(int magic1, int magic2, unsigned int cmd, void __user break; case LINUX_REBOOT_CMD_HALT: - notifier_call_chain(&reboot_notifier_list, SYS_HALT, NULL); - system_state = SYSTEM_HALT; - device_shutdown(); - printk(KERN_EMERG "System halted.\n"); - machine_halt(); + kernel_halt(); unlock_kernel(); do_exit(0); break; case LINUX_REBOOT_CMD_POWER_OFF: - notifier_call_chain(&reboot_notifier_list, SYS_POWER_OFF, NULL); - system_state = SYSTEM_POWER_OFF; - device_shutdown(); - printk(KERN_EMERG "Power down.\n"); - machine_power_off(); + kernel_power_off(); unlock_kernel(); do_exit(0); break; @@ -567,13 +743,14 @@ asmlinkage long sys_reboot(int magic1, int magic2, unsigned int cmd, void __user } buffer[sizeof(buffer) - 1] = '\0'; - notifier_call_chain(&reboot_notifier_list, SYS_RESTART, buffer); - system_state = SYSTEM_RESTART; - device_shutdown(); - printk(KERN_EMERG "Restarting system with command '%s'.\n", buffer); - machine_restart(buffer); + kernel_restart(buffer); break; + case LINUX_REBOOT_CMD_KEXEC: + kernel_kexec(); + unlock_kernel(); + return -EINVAL; + #ifdef CONFIG_SOFTWARE_SUSPEND case LINUX_REBOOT_CMD_SW_SUSPEND: { @@ -593,8 +770,7 @@ asmlinkage long sys_reboot(int magic1, int magic2, unsigned int cmd, void __user static void deferred_cad(void *dummy) { - notifier_call_chain(&reboot_notifier_list, SYS_RESTART, NULL); - machine_restart(NULL); + kernel_restart(NULL); } /* @@ -663,8 +839,8 @@ asmlinkage long sys_setregid(gid_t rgid, gid_t egid) } if (new_egid != old_egid) { - current->mm->dumpable = 0; - wmb(); + current->mm->dumpable = suid_dumpable; + smp_wmb(); } if (rgid != (gid_t) -1 || (egid != (gid_t) -1 && egid != old_rgid)) @@ -672,9 +848,8 @@ asmlinkage long sys_setregid(gid_t rgid, gid_t egid) current->fsgid = new_egid; current->egid = new_egid; current->gid = new_rgid; - - ckrm_cb_gid(); - + key_fsgid_changed(current); + proc_id_connector(current, PROC_EVENT_GID); return 0; } @@ -696,8 +871,8 @@ asmlinkage long sys_setgid(gid_t gid) { if(old_egid != gid) { - current->mm->dumpable=0; - wmb(); + current->mm->dumpable = suid_dumpable; + smp_wmb(); } current->gid = current->egid = current->sgid = current->fsgid = gid; } @@ -705,16 +880,16 @@ asmlinkage long sys_setgid(gid_t gid) { if(old_egid != gid) { - current->mm->dumpable=0; - wmb(); + current->mm->dumpable = suid_dumpable; + smp_wmb(); } current->egid = current->fsgid = gid; } else return -EPERM; - ckrm_cb_gid(); - + key_fsgid_changed(current); + proc_id_connector(current, PROC_EVENT_GID); return 0; } @@ -727,7 +902,7 @@ static int set_user(uid_t new_ruid, int dumpclear) return -EAGAIN; if (atomic_read(&new_user->processes) >= - current->rlim[RLIMIT_NPROC].rlim_cur && + current->signal->rlim[RLIMIT_NPROC].rlim_cur && new_user != &root_user) { free_uid(new_user); return -EAGAIN; @@ -737,8 +912,8 @@ static int set_user(uid_t new_ruid, int dumpclear) if(dumpclear) { - current->mm->dumpable = 0; - wmb(); + current->mm->dumpable = suid_dumpable; + smp_wmb(); } current->uid = new_ruid; return 0; @@ -794,8 +969,8 @@ asmlinkage long sys_setreuid(uid_t ruid, uid_t euid) if (new_euid != old_euid) { - current->mm->dumpable=0; - wmb(); + current->mm->dumpable = suid_dumpable; + smp_wmb(); } current->fsuid = current->euid = new_euid; if (ruid != (uid_t) -1 || @@ -803,7 +978,8 @@ asmlinkage long sys_setreuid(uid_t ruid, uid_t euid) current->suid = current->euid; current->fsuid = current->euid; - ckrm_cb_uid(); + key_fsuid_changed(current); + proc_id_connector(current, PROC_EVENT_UID); return security_task_post_setuid(old_ruid, old_euid, old_suid, LSM_SETID_RE); } @@ -844,13 +1020,14 @@ asmlinkage long sys_setuid(uid_t uid) if (old_euid != uid) { - current->mm->dumpable = 0; - wmb(); + current->mm->dumpable = suid_dumpable; + smp_wmb(); } current->fsuid = current->euid = uid; current->suid = new_suid; - ckrm_cb_uid(); + key_fsuid_changed(current); + proc_id_connector(current, PROC_EVENT_UID); return security_task_post_setuid(old_ruid, old_euid, old_suid, LSM_SETID_ID); } @@ -889,8 +1066,8 @@ asmlinkage long sys_setresuid(uid_t ruid, uid_t euid, uid_t suid) if (euid != (uid_t) -1) { if (euid != current->euid) { - current->mm->dumpable = 0; - wmb(); + current->mm->dumpable = suid_dumpable; + smp_wmb(); } current->euid = euid; } @@ -898,7 +1075,8 @@ asmlinkage long sys_setresuid(uid_t ruid, uid_t euid, uid_t suid) if (suid != (uid_t) -1) current->suid = suid; - ckrm_cb_uid(); + key_fsuid_changed(current); + proc_id_connector(current, PROC_EVENT_UID); return security_task_post_setuid(old_ruid, old_euid, old_suid, LSM_SETID_RES); } @@ -939,8 +1117,8 @@ asmlinkage long sys_setresgid(gid_t rgid, gid_t egid, gid_t sgid) if (egid != (gid_t) -1) { if (egid != current->egid) { - current->mm->dumpable = 0; - wmb(); + current->mm->dumpable = suid_dumpable; + smp_wmb(); } current->egid = egid; } @@ -950,8 +1128,8 @@ asmlinkage long sys_setresgid(gid_t rgid, gid_t egid, gid_t sgid) if (sgid != (gid_t) -1) current->sgid = sgid; - ckrm_cb_gid(); - + key_fsgid_changed(current); + proc_id_connector(current, PROC_EVENT_GID); return 0; } @@ -987,12 +1165,15 @@ asmlinkage long sys_setfsuid(uid_t uid) { if (uid != old_fsuid) { - current->mm->dumpable = 0; - wmb(); + current->mm->dumpable = suid_dumpable; + smp_wmb(); } current->fsuid = uid; } + key_fsuid_changed(current); + proc_id_connector(current, PROC_EVENT_UID); + security_task_post_setuid(old_fsuid, (uid_t)-1, (uid_t)-1, LSM_SETID_FS); return old_fsuid; @@ -1015,10 +1196,12 @@ asmlinkage long sys_setfsgid(gid_t gid) { if (gid != old_fsgid) { - current->mm->dumpable = 0; - wmb(); + current->mm->dumpable = suid_dumpable; + smp_wmb(); } current->fsgid = gid; + key_fsgid_changed(current); + proc_id_connector(current, PROC_EVENT_GID); } return old_fsgid; } @@ -1033,10 +1216,28 @@ asmlinkage long sys_times(struct tms __user * tbuf) */ if (tbuf) { struct tms tmp; - tmp.tms_utime = jiffies_to_clock_t(current->utime); - tmp.tms_stime = jiffies_to_clock_t(current->stime); - tmp.tms_cutime = jiffies_to_clock_t(current->cutime); - tmp.tms_cstime = jiffies_to_clock_t(current->cstime); + struct task_struct *tsk = current; + struct task_struct *t; + cputime_t utime, stime, cutime, cstime; + + spin_lock_irq(&tsk->sighand->siglock); + utime = tsk->signal->utime; + stime = tsk->signal->stime; + t = tsk; + do { + utime = cputime_add(utime, t->utime); + stime = cputime_add(stime, t->stime); + t = next_thread(t); + } while (t != tsk); + + cutime = tsk->signal->cutime; + cstime = tsk->signal->cstime; + spin_unlock_irq(&tsk->sighand->siglock); + + tmp.tms_utime = cputime_to_clock_t(utime); + tmp.tms_stime = cputime_to_clock_t(stime); + tmp.tms_cutime = cputime_to_clock_t(cutime); + tmp.tms_cstime = cputime_to_clock_t(cstime); if (copy_to_user(tbuf, &tmp, sizeof(struct tms))) return -EFAULT; } @@ -1059,15 +1260,19 @@ asmlinkage long sys_times(struct tms __user * tbuf) asmlinkage long sys_setpgid(pid_t pid, pid_t pgid) { struct task_struct *p; + struct task_struct *group_leader = current->group_leader; + pid_t rpgid; int err = -EINVAL; if (!pid) - pid = current->pid; + pid = vx_map_pid(group_leader->pid); if (!pgid) pgid = pid; if (pgid < 0) return -EINVAL; + rpgid = vx_rmap_pid(pgid); + /* From this point forward we keep holding onto the tasklist lock * so that our parent does not change from under us. -DaveM */ @@ -1082,16 +1287,16 @@ asmlinkage long sys_setpgid(pid_t pid, pid_t pgid) if (!thread_group_leader(p)) goto out; - if (p->parent == current || p->real_parent == current) { + if (p->real_parent == group_leader) { err = -EPERM; - if (p->signal->session != current->signal->session) + if (p->signal->session != group_leader->signal->session) goto out; err = -EACCES; if (p->did_exec) goto out; } else { err = -ESRCH; - if (p != current) + if (p != group_leader) goto out; } @@ -1101,24 +1306,23 @@ asmlinkage long sys_setpgid(pid_t pid, pid_t pgid) if (pgid != pid) { struct task_struct *p; - struct pid *pid; - struct list_head *l; - for_each_task_pid(pgid, PIDTYPE_PGID, p, l, pid) - if (p->signal->session == current->signal->session) + do_each_task_pid(rpgid, PIDTYPE_PGID, p) { + if (p->signal->session == group_leader->signal->session) goto ok_pgid; + } while_each_task_pid(rpgid, PIDTYPE_PGID, p); goto out; } ok_pgid: - err = security_task_setpgid(p, pgid); + err = security_task_setpgid(p, rpgid); if (err) goto out; - if (process_group(p) != pgid) { + if (process_group(p) != rpgid) { detach_pid(p, PIDTYPE_PGID); - p->signal->pgrp = pgid; - attach_pid(p, PIDTYPE_PGID, pgid); + p->signal->pgrp = rpgid; + attach_pid(p, PIDTYPE_PGID, rpgid); } err = 0; @@ -1131,7 +1335,7 @@ out: asmlinkage long sys_getpgid(pid_t pid) { if (!pid) { - return process_group(current); + return vx_rmap_pid(process_group(current)); } else { int retval; struct task_struct *p; @@ -1143,7 +1347,7 @@ asmlinkage long sys_getpgid(pid_t pid) if (p) { retval = security_task_getpgid(p); if (!retval) - retval = process_group(p); + retval = vx_rmap_pid(process_group(p)); } read_unlock(&tasklist_lock); return retval; @@ -1184,25 +1388,36 @@ asmlinkage long sys_getsid(pid_t pid) asmlinkage long sys_setsid(void) { - struct pid *pid; + struct task_struct *group_leader = current->group_leader; + pid_t session; int err = -EPERM; - if (!thread_group_leader(current)) - return -EINVAL; - + mutex_lock(&tty_mutex); write_lock_irq(&tasklist_lock); - pid = find_pid(PIDTYPE_PGID, current->pid); - if (pid) + /* Fail if I am already a session leader */ + if (group_leader->signal->leader) + goto out; + + session = group_leader->pid; + /* Fail if a process group id already exists that equals the + * proposed session id. + * + * Don't check if session id == 1 because kernel threads use this + * session id and so the check will always fail and make it so + * init cannot successfully call setsid. + */ + if (session > 1 && find_task_by_pid_type(PIDTYPE_PGID, session)) goto out; - current->signal->leader = 1; - __set_special_pids(current->pid, current->pid); - current->signal->tty = NULL; - current->signal->tty_old_pgrp = 0; - err = process_group(current); + group_leader->signal->leader = 1; + __set_special_pids(session, session); + group_leader->signal->tty = NULL; + group_leader->signal->tty_old_pgrp = 0; + err = process_group(group_leader); out: write_unlock_irq(&tasklist_lock); + mutex_unlock(&tty_mutex); return err; } @@ -1304,7 +1519,7 @@ static int groups_from_user(struct group_info *group_info, return 0; } -/* a simple shell-metzner sort */ +/* a simple Shell sort */ static void groups_sort(struct group_info *group_info) { int base, max, stride; @@ -1334,9 +1549,9 @@ static void groups_sort(struct group_info *group_info) } /* a simple bsearch */ -static int groups_search(struct group_info *group_info, gid_t grp) +int groups_search(struct group_info *group_info, gid_t grp) { - int left, right; + unsigned int left, right; if (!group_info) return 0; @@ -1344,7 +1559,7 @@ static int groups_search(struct group_info *group_info, gid_t grp) left = 0; right = group_info->ngroups; while (left < right) { - int mid = (left+right)/2; + unsigned int mid = (left+right)/2; int cmp = grp - GROUP_AT(group_info, mid); if (cmp > 0) left = mid + 1; @@ -1394,7 +1609,6 @@ asmlinkage long sys_getgroups(int gidsetsize, gid_t __user *grouplist) return -EINVAL; /* no need to grab task_lock here; it cannot change */ - get_group_info(current->group_info); i = current->group_info->ngroups; if (gidsetsize) { if (i > gidsetsize) { @@ -1407,7 +1621,6 @@ asmlinkage long sys_getgroups(int gidsetsize, gid_t __user *grouplist) } } out: - put_group_info(current->group_info); return i; } @@ -1448,9 +1661,7 @@ int in_group_p(gid_t grp) { int retval = 1; if (grp != current->fsgid) { - get_group_info(current->group_info); retval = groups_search(current->group_info, grp); - put_group_info(current->group_info); } return retval; } @@ -1461,9 +1672,7 @@ int in_egroup_p(gid_t grp) { int retval = 1; if (grp != current->egid) { - get_group_info(current->group_info); retval = groups_search(current->group_info, grp); - put_group_info(current->group_info); } return retval; } @@ -1490,7 +1699,7 @@ asmlinkage long sys_sethostname(char __user *name, int len) int errno; char tmp[__NEW_UTS_LEN]; - if (!capable(CAP_SYS_ADMIN) && !vx_ccaps(VXC_SET_UTSNAME)) + if (!vx_capable(CAP_SYS_ADMIN, VXC_SET_UTSNAME)) return -EPERM; if (len < 0 || len > __NEW_UTS_LEN) return -EINVAL; @@ -1539,7 +1748,7 @@ asmlinkage long sys_setdomainname(char __user *name, int len) int errno; char tmp[__NEW_UTS_LEN]; - if (!capable(CAP_SYS_ADMIN) && !vx_ccaps(VXC_SET_UTSNAME)) + if (!vx_capable(CAP_SYS_ADMIN, VXC_SET_UTSNAME)) return -EPERM; if (len < 0 || len > __NEW_UTS_LEN) return -EINVAL; @@ -1561,9 +1770,13 @@ asmlinkage long sys_getrlimit(unsigned int resource, struct rlimit __user *rlim) { if (resource >= RLIM_NLIMITS) return -EINVAL; - else - return copy_to_user(rlim, current->rlim + resource, sizeof(*rlim)) - ? -EFAULT : 0; + else { + struct rlimit value; + task_lock(current->group_leader); + value = current->signal->rlim[resource]; + task_unlock(current->group_leader); + return copy_to_user(rlim, &value, sizeof(*rlim)) ? -EFAULT : 0; + } } #ifdef __ARCH_WANT_SYS_OLD_GETRLIMIT @@ -1578,7 +1791,9 @@ asmlinkage long sys_old_getrlimit(unsigned int resource, struct rlimit __user *r if (resource >= RLIM_NLIMITS) return -EINVAL; - memcpy(&x, current->rlim + resource, sizeof(*rlim)); + task_lock(current->group_leader); + x = current->signal->rlim[resource]; + task_unlock(current->group_leader); if(x.rlim_cur > 0x7FFFFFFF) x.rlim_cur = 0x7FFFFFFF; if(x.rlim_max > 0x7FFFFFFF) @@ -1591,29 +1806,64 @@ asmlinkage long sys_old_getrlimit(unsigned int resource, struct rlimit __user *r asmlinkage long sys_setrlimit(unsigned int resource, struct rlimit __user *rlim) { struct rlimit new_rlim, *old_rlim; + unsigned long it_prof_secs; int retval; if (resource >= RLIM_NLIMITS) return -EINVAL; - if(copy_from_user(&new_rlim, rlim, sizeof(*rlim))) + if (copy_from_user(&new_rlim, rlim, sizeof(*rlim))) return -EFAULT; - if (new_rlim.rlim_cur > new_rlim.rlim_max) - return -EINVAL; - old_rlim = current->rlim + resource; - if (((new_rlim.rlim_cur > old_rlim->rlim_max) || - (new_rlim.rlim_max > old_rlim->rlim_max)) && - !capable(CAP_SYS_RESOURCE) && vx_ccaps(VXC_SET_RLIMIT)) + if (new_rlim.rlim_cur > new_rlim.rlim_max) + return -EINVAL; + old_rlim = current->signal->rlim + resource; + if ((new_rlim.rlim_max > old_rlim->rlim_max) && + !vx_capable(CAP_SYS_RESOURCE, VXC_SET_RLIMIT)) + return -EPERM; + if (resource == RLIMIT_NOFILE && new_rlim.rlim_max > NR_OPEN) return -EPERM; - if (resource == RLIMIT_NOFILE) { - if (new_rlim.rlim_cur > NR_OPEN || new_rlim.rlim_max > NR_OPEN) - return -EPERM; - } retval = security_task_setrlimit(resource, &new_rlim); if (retval) return retval; + task_lock(current->group_leader); *old_rlim = new_rlim; + task_unlock(current->group_leader); + + if (resource != RLIMIT_CPU) + goto out; + + /* + * RLIMIT_CPU handling. Note that the kernel fails to return an error + * code if it rejected the user's attempt to set RLIMIT_CPU. This is a + * very long-standing error, and fixing it now risks breakage of + * applications, so we live with it + */ + if (new_rlim.rlim_cur == RLIM_INFINITY) + goto out; + + it_prof_secs = cputime_to_secs(current->signal->it_prof_expires); + if (it_prof_secs == 0 || new_rlim.rlim_cur <= it_prof_secs) { + unsigned long rlim_cur = new_rlim.rlim_cur; + cputime_t cputime; + + if (rlim_cur == 0) { + /* + * The caller is asking for an immediate RLIMIT_CPU + * expiry. But we use the zero value to mean "it was + * never set". So let's cheat and make it one second + * instead + */ + rlim_cur = 1; + } + cputime = secs_to_cputime(rlim_cur); + read_lock(&tasklist_lock); + spin_lock_irq(¤t->sighand->siglock); + set_process_cpu_timer(current, CPUCLOCK_PROF, &cputime, NULL); + spin_unlock_irq(¤t->sighand->siglock); + read_unlock(&tasklist_lock); + } +out: return 0; } @@ -1625,44 +1875,105 @@ asmlinkage long sys_setrlimit(unsigned int resource, struct rlimit __user *rlim) * a lot simpler! (Which we're not doing right now because we're not * measuring them yet). * - * This is SMP safe. Either we are called from sys_getrusage on ourselves - * below (we know we aren't going to exit/disappear and only we change our - * rusage counters), or we are called from wait4() on a process which is - * either stopped or zombied. In the zombied case the task won't get - * reaped till shortly after the call to getrusage(), in both cases the - * task being examined is in a frozen state so the counters won't change. + * When sampling multiple threads for RUSAGE_SELF, under SMP we might have + * races with threads incrementing their own counters. But since word + * reads are atomic, we either get new values or old values and we don't + * care which for the sums. We always take the siglock to protect reading + * the c* fields from p->signal from races with exit.c updating those + * fields when reaping, so a sample either gets all the additions of a + * given child after it's reaped, or none so this sample is before reaping. + * + * tasklist_lock locking optimisation: + * If we are current and single threaded, we do not need to take the tasklist + * lock or the siglock. No one else can take our signal_struct away, + * no one else can reap the children to update signal->c* counters, and + * no one else can race with the signal-> fields. + * If we do not take the tasklist_lock, the signal-> fields could be read + * out of order while another thread was just exiting. So we place a + * read memory barrier when we avoid the lock. On the writer side, + * write memory barrier is implied in __exit_signal as __exit_signal releases + * the siglock spinlock after updating the signal-> fields. + * + * We don't really need the siglock when we access the non c* fields + * of the signal_struct (for RUSAGE_SELF) even in multithreaded + * case, since we take the tasklist lock for read and the non c* signal-> + * fields are updated only in __exit_signal, which is called with + * tasklist_lock taken for write, hence these two threads cannot execute + * concurrently. + * */ -int getrusage(struct task_struct *p, int who, struct rusage __user *ru) + +static void k_getrusage(struct task_struct *p, int who, struct rusage *r) { - struct rusage r; + struct task_struct *t; + unsigned long flags; + cputime_t utime, stime; + int need_lock = 0; + + memset((char *) r, 0, sizeof *r); + utime = stime = cputime_zero; + + if (p != current || !thread_group_empty(p)) + need_lock = 1; + + if (need_lock) { + read_lock(&tasklist_lock); + if (unlikely(!p->signal)) { + read_unlock(&tasklist_lock); + return; + } + } else + /* See locking comments above */ + smp_rmb(); - memset((char *) &r, 0, sizeof(r)); switch (who) { - case RUSAGE_SELF: - jiffies_to_timeval(p->utime, &r.ru_utime); - jiffies_to_timeval(p->stime, &r.ru_stime); - r.ru_nvcsw = p->nvcsw; - r.ru_nivcsw = p->nivcsw; - r.ru_minflt = p->min_flt; - r.ru_majflt = p->maj_flt; - break; + case RUSAGE_BOTH: case RUSAGE_CHILDREN: - jiffies_to_timeval(p->cutime, &r.ru_utime); - jiffies_to_timeval(p->cstime, &r.ru_stime); - r.ru_nvcsw = p->cnvcsw; - r.ru_nivcsw = p->cnivcsw; - r.ru_minflt = p->cmin_flt; - r.ru_majflt = p->cmaj_flt; + spin_lock_irqsave(&p->sighand->siglock, flags); + utime = p->signal->cutime; + stime = p->signal->cstime; + r->ru_nvcsw = p->signal->cnvcsw; + r->ru_nivcsw = p->signal->cnivcsw; + r->ru_minflt = p->signal->cmin_flt; + r->ru_majflt = p->signal->cmaj_flt; + spin_unlock_irqrestore(&p->sighand->siglock, flags); + + if (who == RUSAGE_CHILDREN) + break; + + case RUSAGE_SELF: + utime = cputime_add(utime, p->signal->utime); + stime = cputime_add(stime, p->signal->stime); + r->ru_nvcsw += p->signal->nvcsw; + r->ru_nivcsw += p->signal->nivcsw; + r->ru_minflt += p->signal->min_flt; + r->ru_majflt += p->signal->maj_flt; + t = p; + do { + utime = cputime_add(utime, t->utime); + stime = cputime_add(stime, t->stime); + r->ru_nvcsw += t->nvcsw; + r->ru_nivcsw += t->nivcsw; + r->ru_minflt += t->min_flt; + r->ru_majflt += t->maj_flt; + t = next_thread(t); + } while (t != p); break; + default: - jiffies_to_timeval(p->utime + p->cutime, &r.ru_utime); - jiffies_to_timeval(p->stime + p->cstime, &r.ru_stime); - r.ru_nvcsw = p->nvcsw + p->cnvcsw; - r.ru_nivcsw = p->nivcsw + p->cnivcsw; - r.ru_minflt = p->min_flt + p->cmin_flt; - r.ru_majflt = p->maj_flt + p->cmaj_flt; - break; + BUG(); } + + if (need_lock) + read_unlock(&tasklist_lock); + cputime_to_timeval(utime, &r->ru_utime); + cputime_to_timeval(stime, &r->ru_stime); +} + +int getrusage(struct task_struct *p, int who, struct rusage __user *ru) +{ + struct rusage r; + k_getrusage(p, who, &r); return copy_to_user(ru, &r, sizeof(r)) ? -EFAULT : 0; } @@ -1682,8 +1993,7 @@ asmlinkage long sys_umask(int mask) asmlinkage long sys_prctl(int option, unsigned long arg2, unsigned long arg3, unsigned long arg4, unsigned long arg5) { - int error; - int sig; + long error; error = security_task_prctl(option, arg2, arg3, arg4, arg5); if (error) @@ -1691,22 +2001,20 @@ asmlinkage long sys_prctl(int option, unsigned long arg2, unsigned long arg3, switch (option) { case PR_SET_PDEATHSIG: - sig = arg2; - if (sig < 0 || sig > _NSIG) { + if (!valid_signal(arg2)) { error = -EINVAL; break; } - current->pdeath_signal = sig; + current->pdeath_signal = arg2; break; case PR_GET_PDEATHSIG: error = put_user(current->pdeath_signal, (int __user *)arg2); break; case PR_GET_DUMPABLE: - if (current->mm->dumpable) - error = 1; + error = current->mm->dumpable; break; case PR_SET_DUMPABLE: - if (arg2 != 0 && arg2 != 1) { + if (arg2 < 0 || arg2 > 1) { error = -EINVAL; break; } @@ -1752,6 +2060,26 @@ asmlinkage long sys_prctl(int option, unsigned long arg2, unsigned long arg3, } current->keep_capabilities = arg2; break; + case PR_SET_NAME: { + struct task_struct *me = current; + unsigned char ncomm[sizeof(me->comm)]; + + ncomm[sizeof(me->comm)-1] = 0; + if (strncpy_from_user(ncomm, (char __user *)arg2, + sizeof(me->comm)-1) < 0) + return -EFAULT; + set_task_comm(me, ncomm); + return 0; + } + case PR_GET_NAME: { + struct task_struct *me = current; + unsigned char tcomm[sizeof(me->comm)]; + + get_task_comm(tcomm, me); + if (copy_to_user((char __user *)arg2, tcomm, sizeof(tcomm))) + return -EFAULT; + return 0; + } default: error = -EINVAL; break;