4 * Copyright (C) 1991, 1992 Linus Torvalds
7 #include <linux/module.h>
9 #include <linux/utsname.h>
10 #include <linux/mman.h>
11 #include <linux/smp_lock.h>
12 #include <linux/notifier.h>
13 #include <linux/kmod.h>
14 #include <linux/reboot.h>
15 #include <linux/prctl.h>
16 #include <linux/highuid.h>
18 #include <linux/kernel.h>
19 #include <linux/kexec.h>
20 #include <linux/workqueue.h>
21 #include <linux/capability.h>
22 #include <linux/device.h>
23 #include <linux/key.h>
24 #include <linux/times.h>
25 #include <linux/posix-timers.h>
26 #include <linux/security.h>
27 #include <linux/dcookies.h>
28 #include <linux/suspend.h>
29 #include <linux/tty.h>
30 #include <linux/signal.h>
31 #include <linux/cn_proc.h>
32 #include <linux/vs_cvirt.h>
34 #include <linux/compat.h>
35 #include <linux/syscalls.h>
36 #include <linux/kprobes.h>
38 #include <asm/uaccess.h>
40 #include <asm/unistd.h>
42 #ifndef SET_UNALIGN_CTL
43 # define SET_UNALIGN_CTL(a,b) (-EINVAL)
45 #ifndef GET_UNALIGN_CTL
46 # define GET_UNALIGN_CTL(a,b) (-EINVAL)
49 # define SET_FPEMU_CTL(a,b) (-EINVAL)
52 # define GET_FPEMU_CTL(a,b) (-EINVAL)
55 # define SET_FPEXC_CTL(a,b) (-EINVAL)
58 # define GET_FPEXC_CTL(a,b) (-EINVAL)
61 # define GET_ENDIAN(a,b) (-EINVAL)
64 # define SET_ENDIAN(a,b) (-EINVAL)
68 * this is where the system-wide overflow UID and GID are defined, for
69 * architectures that now have 32-bit UID/GID but didn't in the past
72 int overflowuid = DEFAULT_OVERFLOWUID;
73 int overflowgid = DEFAULT_OVERFLOWGID;
76 EXPORT_SYMBOL(overflowuid);
77 EXPORT_SYMBOL(overflowgid);
81 * the same as above, but for filesystems which can only store a 16-bit
82 * UID and GID. as such, this is needed on all architectures
85 int fs_overflowuid = DEFAULT_FS_OVERFLOWUID;
86 int fs_overflowgid = DEFAULT_FS_OVERFLOWUID;
88 EXPORT_SYMBOL(fs_overflowuid);
89 EXPORT_SYMBOL(fs_overflowgid);
92 * this indicates whether you can reboot with ctrl-alt-del: the default is yes
99 * Notifier list for kernel code which wants to be called
100 * at shutdown. This is used to stop any idling DMA operations
104 static BLOCKING_NOTIFIER_HEAD(reboot_notifier_list);
107 * Notifier chain core routines. The exported routines below
108 * are layered on top of these, with appropriate locking added.
111 static int notifier_chain_register(struct notifier_block **nl,
112 struct notifier_block *n)
114 while ((*nl) != NULL) {
115 if (n->priority > (*nl)->priority)
120 rcu_assign_pointer(*nl, n);
124 static int notifier_chain_unregister(struct notifier_block **nl,
125 struct notifier_block *n)
127 while ((*nl) != NULL) {
129 rcu_assign_pointer(*nl, n->next);
137 static int __kprobes notifier_call_chain(struct notifier_block **nl,
138 unsigned long val, void *v)
140 int ret = NOTIFY_DONE;
141 struct notifier_block *nb, *next_nb;
143 nb = rcu_dereference(*nl);
145 next_nb = rcu_dereference(nb->next);
146 ret = nb->notifier_call(nb, val, v);
147 if ((ret & NOTIFY_STOP_MASK) == NOTIFY_STOP_MASK)
155 * Atomic notifier chain routines. Registration and unregistration
156 * use a mutex, and call_chain is synchronized by RCU (no locks).
160 * atomic_notifier_chain_register - Add notifier to an atomic notifier chain
161 * @nh: Pointer to head of the atomic notifier chain
162 * @n: New entry in notifier chain
164 * Adds a notifier to an atomic notifier chain.
166 * Currently always returns zero.
169 int atomic_notifier_chain_register(struct atomic_notifier_head *nh,
170 struct notifier_block *n)
175 spin_lock_irqsave(&nh->lock, flags);
176 ret = notifier_chain_register(&nh->head, n);
177 spin_unlock_irqrestore(&nh->lock, flags);
181 EXPORT_SYMBOL_GPL(atomic_notifier_chain_register);
184 * atomic_notifier_chain_unregister - Remove notifier from an atomic notifier chain
185 * @nh: Pointer to head of the atomic notifier chain
186 * @n: Entry to remove from notifier chain
188 * Removes a notifier from an atomic notifier chain.
190 * Returns zero on success or %-ENOENT on failure.
192 int atomic_notifier_chain_unregister(struct atomic_notifier_head *nh,
193 struct notifier_block *n)
198 spin_lock_irqsave(&nh->lock, flags);
199 ret = notifier_chain_unregister(&nh->head, n);
200 spin_unlock_irqrestore(&nh->lock, flags);
205 EXPORT_SYMBOL_GPL(atomic_notifier_chain_unregister);
208 * atomic_notifier_call_chain - Call functions in an atomic notifier chain
209 * @nh: Pointer to head of the atomic notifier chain
210 * @val: Value passed unmodified to notifier function
211 * @v: Pointer passed unmodified to notifier function
213 * Calls each function in a notifier chain in turn. The functions
214 * run in an atomic context, so they must not block.
215 * This routine uses RCU to synchronize with changes to the chain.
217 * If the return value of the notifier can be and'ed
218 * with %NOTIFY_STOP_MASK then atomic_notifier_call_chain
219 * will return immediately, with the return value of
220 * the notifier function which halted execution.
221 * Otherwise the return value is the return value
222 * of the last notifier function called.
225 int atomic_notifier_call_chain(struct atomic_notifier_head *nh,
226 unsigned long val, void *v)
231 ret = notifier_call_chain(&nh->head, val, v);
236 EXPORT_SYMBOL_GPL(atomic_notifier_call_chain);
239 * Blocking notifier chain routines. All access to the chain is
240 * synchronized by an rwsem.
244 * blocking_notifier_chain_register - Add notifier to a blocking notifier chain
245 * @nh: Pointer to head of the blocking notifier chain
246 * @n: New entry in notifier chain
248 * Adds a notifier to a blocking notifier chain.
249 * Must be called in process context.
251 * Currently always returns zero.
254 int blocking_notifier_chain_register(struct blocking_notifier_head *nh,
255 struct notifier_block *n)
260 * This code gets used during boot-up, when task switching is
261 * not yet working and interrupts must remain disabled. At
262 * such times we must not call down_write().
264 if (unlikely(system_state == SYSTEM_BOOTING))
265 return notifier_chain_register(&nh->head, n);
267 down_write(&nh->rwsem);
268 ret = notifier_chain_register(&nh->head, n);
269 up_write(&nh->rwsem);
273 EXPORT_SYMBOL_GPL(blocking_notifier_chain_register);
276 * blocking_notifier_chain_unregister - Remove notifier from a blocking notifier chain
277 * @nh: Pointer to head of the blocking notifier chain
278 * @n: Entry to remove from notifier chain
280 * Removes a notifier from a blocking notifier chain.
281 * Must be called from process context.
283 * Returns zero on success or %-ENOENT on failure.
285 int blocking_notifier_chain_unregister(struct blocking_notifier_head *nh,
286 struct notifier_block *n)
291 * This code gets used during boot-up, when task switching is
292 * not yet working and interrupts must remain disabled. At
293 * such times we must not call down_write().
295 if (unlikely(system_state == SYSTEM_BOOTING))
296 return notifier_chain_unregister(&nh->head, n);
298 down_write(&nh->rwsem);
299 ret = notifier_chain_unregister(&nh->head, n);
300 up_write(&nh->rwsem);
304 EXPORT_SYMBOL_GPL(blocking_notifier_chain_unregister);
307 * blocking_notifier_call_chain - Call functions in a blocking notifier chain
308 * @nh: Pointer to head of the blocking notifier chain
309 * @val: Value passed unmodified to notifier function
310 * @v: Pointer passed unmodified to notifier function
312 * Calls each function in a notifier chain in turn. The functions
313 * run in a process context, so they are allowed to block.
315 * If the return value of the notifier can be and'ed
316 * with %NOTIFY_STOP_MASK then blocking_notifier_call_chain
317 * will return immediately, with the return value of
318 * the notifier function which halted execution.
319 * Otherwise the return value is the return value
320 * of the last notifier function called.
323 int blocking_notifier_call_chain(struct blocking_notifier_head *nh,
324 unsigned long val, void *v)
328 down_read(&nh->rwsem);
329 ret = notifier_call_chain(&nh->head, val, v);
334 EXPORT_SYMBOL_GPL(blocking_notifier_call_chain);
337 * Raw notifier chain routines. There is no protection;
338 * the caller must provide it. Use at your own risk!
342 * raw_notifier_chain_register - Add notifier to a raw notifier chain
343 * @nh: Pointer to head of the raw notifier chain
344 * @n: New entry in notifier chain
346 * Adds a notifier to a raw notifier chain.
347 * All locking must be provided by the caller.
349 * Currently always returns zero.
352 int raw_notifier_chain_register(struct raw_notifier_head *nh,
353 struct notifier_block *n)
355 return notifier_chain_register(&nh->head, n);
358 EXPORT_SYMBOL_GPL(raw_notifier_chain_register);
361 * raw_notifier_chain_unregister - Remove notifier from a raw notifier chain
362 * @nh: Pointer to head of the raw notifier chain
363 * @n: Entry to remove from notifier chain
365 * Removes a notifier from a raw notifier chain.
366 * All locking must be provided by the caller.
368 * Returns zero on success or %-ENOENT on failure.
370 int raw_notifier_chain_unregister(struct raw_notifier_head *nh,
371 struct notifier_block *n)
373 return notifier_chain_unregister(&nh->head, n);
376 EXPORT_SYMBOL_GPL(raw_notifier_chain_unregister);
379 * raw_notifier_call_chain - Call functions in a raw notifier chain
380 * @nh: Pointer to head of the raw notifier chain
381 * @val: Value passed unmodified to notifier function
382 * @v: Pointer passed unmodified to notifier function
384 * Calls each function in a notifier chain in turn. The functions
385 * run in an undefined context.
386 * All locking must be provided by the caller.
388 * If the return value of the notifier can be and'ed
389 * with %NOTIFY_STOP_MASK then raw_notifier_call_chain
390 * will return immediately, with the return value of
391 * the notifier function which halted execution.
392 * Otherwise the return value is the return value
393 * of the last notifier function called.
396 int raw_notifier_call_chain(struct raw_notifier_head *nh,
397 unsigned long val, void *v)
399 return notifier_call_chain(&nh->head, val, v);
402 EXPORT_SYMBOL_GPL(raw_notifier_call_chain);
405 * register_reboot_notifier - Register function to be called at reboot time
406 * @nb: Info about notifier function to be called
408 * Registers a function with the list of functions
409 * to be called at reboot time.
411 * Currently always returns zero, as blocking_notifier_chain_register
412 * always returns zero.
415 int register_reboot_notifier(struct notifier_block * nb)
417 return blocking_notifier_chain_register(&reboot_notifier_list, nb);
420 EXPORT_SYMBOL(register_reboot_notifier);
423 * unregister_reboot_notifier - Unregister previously registered reboot notifier
424 * @nb: Hook to be unregistered
426 * Unregisters a previously registered reboot
429 * Returns zero on success, or %-ENOENT on failure.
432 int unregister_reboot_notifier(struct notifier_block * nb)
434 return blocking_notifier_chain_unregister(&reboot_notifier_list, nb);
437 EXPORT_SYMBOL(unregister_reboot_notifier);
439 static int set_one_prio(struct task_struct *p, int niceval, int error)
443 if (p->uid != current->euid &&
444 p->euid != current->euid && !capable(CAP_SYS_NICE)) {
448 if (niceval < task_nice(p) && !can_nice(p, niceval)) {
449 if (vx_flags(VXF_IGNEG_NICE, 0))
455 no_nice = security_task_setnice(p, niceval);
462 set_user_nice(p, niceval);
467 asmlinkage long sys_setpriority(int which, int who, int niceval)
469 struct task_struct *g, *p;
470 struct user_struct *user;
473 if (which > 2 || which < 0)
476 /* normalize: avoid signed division (rounding problems) */
483 read_lock(&tasklist_lock);
488 p = find_task_by_pid(who);
490 error = set_one_prio(p, niceval, error);
494 who = process_group(current);
495 do_each_task_pid(who, PIDTYPE_PGID, p) {
496 error = set_one_prio(p, niceval, error);
497 } while_each_task_pid(who, PIDTYPE_PGID, p);
500 user = current->user;
504 if ((who != current->uid) &&
505 !(user = find_user(vx_current_xid(), who)))
506 goto out_unlock; /* No processes for this user */
510 error = set_one_prio(p, niceval, error);
511 while_each_thread(g, p);
512 if (who != current->uid)
513 free_uid(user); /* For find_user() */
517 read_unlock(&tasklist_lock);
523 * Ugh. To avoid negative return values, "getpriority()" will
524 * not return the normal nice-value, but a negated value that
525 * has been offset by 20 (ie it returns 40..1 instead of -20..19)
526 * to stay compatible.
528 asmlinkage long sys_getpriority(int which, int who)
530 struct task_struct *g, *p;
531 struct user_struct *user;
532 long niceval, retval = -ESRCH;
534 if (which > 2 || which < 0)
537 read_lock(&tasklist_lock);
542 p = find_task_by_pid(who);
544 niceval = 20 - task_nice(p);
545 if (niceval > retval)
551 who = process_group(current);
552 do_each_task_pid(who, PIDTYPE_PGID, p) {
553 niceval = 20 - task_nice(p);
554 if (niceval > retval)
556 } while_each_task_pid(who, PIDTYPE_PGID, p);
559 user = current->user;
563 if ((who != current->uid) &&
564 !(user = find_user(vx_current_xid(), who)))
565 goto out_unlock; /* No processes for this user */
569 niceval = 20 - task_nice(p);
570 if (niceval > retval)
573 while_each_thread(g, p);
574 if (who != current->uid)
575 free_uid(user); /* for find_user() */
579 read_unlock(&tasklist_lock);
585 * emergency_restart - reboot the system
587 * Without shutting down any hardware or taking any locks
588 * reboot the system. This is called when we know we are in
589 * trouble so this is our best effort to reboot. This is
590 * safe to call in interrupt context.
592 void emergency_restart(void)
594 machine_emergency_restart();
596 EXPORT_SYMBOL_GPL(emergency_restart);
598 static void kernel_restart_prepare(char *cmd)
600 blocking_notifier_call_chain(&reboot_notifier_list, SYS_RESTART, cmd);
601 system_state = SYSTEM_RESTART;
606 * kernel_restart - reboot the system
607 * @cmd: pointer to buffer containing command to execute for restart
610 * Shutdown everything and perform a clean reboot.
611 * This is not safe to call in interrupt context.
613 void kernel_restart(char *cmd)
615 kernel_restart_prepare(cmd);
617 printk(KERN_EMERG "Restarting system.\n");
619 printk(KERN_EMERG "Restarting system with command '%s'.\n", cmd);
622 machine_restart(cmd);
624 EXPORT_SYMBOL_GPL(kernel_restart);
627 * kernel_kexec - reboot the system
629 * Move into place and start executing a preloaded standalone
630 * executable. If nothing was preloaded return an error.
632 static void kernel_kexec(void)
635 struct kimage *image;
636 image = xchg(&kexec_image, NULL);
640 kernel_restart_prepare(NULL);
641 printk(KERN_EMERG "Starting new kernel\n");
643 machine_kexec(image);
647 void kernel_shutdown_prepare(enum system_states state)
649 blocking_notifier_call_chain(&reboot_notifier_list,
650 (state == SYSTEM_HALT)?SYS_HALT:SYS_POWER_OFF, NULL);
651 system_state = state;
655 * kernel_halt - halt the system
657 * Shutdown everything and perform a clean system halt.
659 void kernel_halt(void)
661 kernel_shutdown_prepare(SYSTEM_HALT);
662 printk(KERN_EMERG "System halted.\n");
666 EXPORT_SYMBOL_GPL(kernel_halt);
669 * kernel_power_off - power_off the system
671 * Shutdown everything and perform a clean system power_off.
673 void kernel_power_off(void)
675 kernel_shutdown_prepare(SYSTEM_POWER_OFF);
676 printk(KERN_EMERG "Power down.\n");
679 EXPORT_SYMBOL_GPL(kernel_power_off);
681 long vs_reboot(unsigned int, void __user *);
684 * Reboot system call: for obvious reasons only root may call it,
685 * and even root needs to set up some magic numbers in the registers
686 * so that some mistake won't make this reboot the whole machine.
687 * You can also set the meaning of the ctrl-alt-del-key here.
689 * reboot doesn't sync: do that yourself before calling this.
691 asmlinkage long sys_reboot(int magic1, int magic2, unsigned int cmd, void __user * arg)
695 /* We only trust the superuser with rebooting the system. */
696 if (!capable(CAP_SYS_BOOT))
699 /* For safety, we require "magic" arguments. */
700 if (magic1 != LINUX_REBOOT_MAGIC1 ||
701 (magic2 != LINUX_REBOOT_MAGIC2 &&
702 magic2 != LINUX_REBOOT_MAGIC2A &&
703 magic2 != LINUX_REBOOT_MAGIC2B &&
704 magic2 != LINUX_REBOOT_MAGIC2C))
707 /* Instead of trying to make the power_off code look like
708 * halt when pm_power_off is not set do it the easy way.
710 if ((cmd == LINUX_REBOOT_CMD_POWER_OFF) && !pm_power_off)
711 cmd = LINUX_REBOOT_CMD_HALT;
713 if (!vx_check(0, VX_ADMIN|VX_WATCH))
714 return vs_reboot(cmd, arg);
718 case LINUX_REBOOT_CMD_RESTART:
719 kernel_restart(NULL);
722 case LINUX_REBOOT_CMD_CAD_ON:
726 case LINUX_REBOOT_CMD_CAD_OFF:
730 case LINUX_REBOOT_CMD_HALT:
736 case LINUX_REBOOT_CMD_POWER_OFF:
742 case LINUX_REBOOT_CMD_RESTART2:
743 if (strncpy_from_user(&buffer[0], arg, sizeof(buffer) - 1) < 0) {
747 buffer[sizeof(buffer) - 1] = '\0';
749 kernel_restart(buffer);
752 case LINUX_REBOOT_CMD_KEXEC:
757 #ifdef CONFIG_SOFTWARE_SUSPEND
758 case LINUX_REBOOT_CMD_SW_SUSPEND:
760 int ret = software_suspend();
774 static void deferred_cad(void *dummy)
776 kernel_restart(NULL);
780 * This function gets called by ctrl-alt-del - ie the keyboard interrupt.
781 * As it's called within an interrupt, it may NOT sync: the only choice
782 * is whether to reboot at once, or just ignore the ctrl-alt-del.
784 void ctrl_alt_del(void)
786 static DECLARE_WORK(cad_work, deferred_cad, NULL);
789 schedule_work(&cad_work);
791 kill_proc(cad_pid, SIGINT, 1);
796 * Unprivileged users may change the real gid to the effective gid
797 * or vice versa. (BSD-style)
799 * If you set the real gid at all, or set the effective gid to a value not
800 * equal to the real gid, then the saved gid is set to the new effective gid.
802 * This makes it possible for a setgid program to completely drop its
803 * privileges, which is often a useful assertion to make when you are doing
804 * a security audit over a program.
806 * The general idea is that a program which uses just setregid() will be
807 * 100% compatible with BSD. A program which uses just setgid() will be
808 * 100% compatible with POSIX with saved IDs.
810 * SMP: There are not races, the GIDs are checked only by filesystem
811 * operations (as far as semantic preservation is concerned).
813 asmlinkage long sys_setregid(gid_t rgid, gid_t egid)
815 int old_rgid = current->gid;
816 int old_egid = current->egid;
817 int new_rgid = old_rgid;
818 int new_egid = old_egid;
821 retval = security_task_setgid(rgid, egid, (gid_t)-1, LSM_SETID_RE);
825 if (rgid != (gid_t) -1) {
826 if ((old_rgid == rgid) ||
827 (current->egid==rgid) ||
833 if (egid != (gid_t) -1) {
834 if ((old_rgid == egid) ||
835 (current->egid == egid) ||
836 (current->sgid == egid) ||
843 if (new_egid != old_egid)
845 current->mm->dumpable = suid_dumpable;
848 if (rgid != (gid_t) -1 ||
849 (egid != (gid_t) -1 && egid != old_rgid))
850 current->sgid = new_egid;
851 current->fsgid = new_egid;
852 current->egid = new_egid;
853 current->gid = new_rgid;
854 key_fsgid_changed(current);
855 proc_id_connector(current, PROC_EVENT_GID);
860 * setgid() is implemented like SysV w/ SAVED_IDS
862 * SMP: Same implicit races as above.
864 asmlinkage long sys_setgid(gid_t gid)
866 int old_egid = current->egid;
869 retval = security_task_setgid(gid, (gid_t)-1, (gid_t)-1, LSM_SETID_ID);
873 if (capable(CAP_SETGID))
877 current->mm->dumpable = suid_dumpable;
880 current->gid = current->egid = current->sgid = current->fsgid = gid;
882 else if ((gid == current->gid) || (gid == current->sgid))
886 current->mm->dumpable = suid_dumpable;
889 current->egid = current->fsgid = gid;
894 key_fsgid_changed(current);
895 proc_id_connector(current, PROC_EVENT_GID);
899 static int set_user(uid_t new_ruid, int dumpclear)
901 struct user_struct *new_user;
903 new_user = alloc_uid(vx_current_xid(), new_ruid);
907 if (atomic_read(&new_user->processes) >=
908 current->signal->rlim[RLIMIT_NPROC].rlim_cur &&
909 new_user != &root_user) {
914 switch_uid(new_user);
918 current->mm->dumpable = suid_dumpable;
921 current->uid = new_ruid;
926 * Unprivileged users may change the real uid to the effective uid
927 * or vice versa. (BSD-style)
929 * If you set the real uid at all, or set the effective uid to a value not
930 * equal to the real uid, then the saved uid is set to the new effective uid.
932 * This makes it possible for a setuid program to completely drop its
933 * privileges, which is often a useful assertion to make when you are doing
934 * a security audit over a program.
936 * The general idea is that a program which uses just setreuid() will be
937 * 100% compatible with BSD. A program which uses just setuid() will be
938 * 100% compatible with POSIX with saved IDs.
940 asmlinkage long sys_setreuid(uid_t ruid, uid_t euid)
942 int old_ruid, old_euid, old_suid, new_ruid, new_euid;
945 retval = security_task_setuid(ruid, euid, (uid_t)-1, LSM_SETID_RE);
949 new_ruid = old_ruid = current->uid;
950 new_euid = old_euid = current->euid;
951 old_suid = current->suid;
953 if (ruid != (uid_t) -1) {
955 if ((old_ruid != ruid) &&
956 (current->euid != ruid) &&
957 !capable(CAP_SETUID))
961 if (euid != (uid_t) -1) {
963 if ((old_ruid != euid) &&
964 (current->euid != euid) &&
965 (current->suid != euid) &&
966 !capable(CAP_SETUID))
970 if (new_ruid != old_ruid && set_user(new_ruid, new_euid != old_euid) < 0)
973 if (new_euid != old_euid)
975 current->mm->dumpable = suid_dumpable;
978 current->fsuid = current->euid = new_euid;
979 if (ruid != (uid_t) -1 ||
980 (euid != (uid_t) -1 && euid != old_ruid))
981 current->suid = current->euid;
982 current->fsuid = current->euid;
984 key_fsuid_changed(current);
985 proc_id_connector(current, PROC_EVENT_UID);
987 return security_task_post_setuid(old_ruid, old_euid, old_suid, LSM_SETID_RE);
993 * setuid() is implemented like SysV with SAVED_IDS
995 * Note that SAVED_ID's is deficient in that a setuid root program
996 * like sendmail, for example, cannot set its uid to be a normal
997 * user and then switch back, because if you're root, setuid() sets
998 * the saved uid too. If you don't like this, blame the bright people
999 * in the POSIX committee and/or USG. Note that the BSD-style setreuid()
1000 * will allow a root program to temporarily drop privileges and be able to
1001 * regain them by swapping the real and effective uid.
1003 asmlinkage long sys_setuid(uid_t uid)
1005 int old_euid = current->euid;
1006 int old_ruid, old_suid, new_ruid, new_suid;
1009 retval = security_task_setuid(uid, (uid_t)-1, (uid_t)-1, LSM_SETID_ID);
1013 old_ruid = new_ruid = current->uid;
1014 old_suid = current->suid;
1015 new_suid = old_suid;
1017 if (capable(CAP_SETUID)) {
1018 if (uid != old_ruid && set_user(uid, old_euid != uid) < 0)
1021 } else if ((uid != current->uid) && (uid != new_suid))
1024 if (old_euid != uid)
1026 current->mm->dumpable = suid_dumpable;
1029 current->fsuid = current->euid = uid;
1030 current->suid = new_suid;
1032 key_fsuid_changed(current);
1033 proc_id_connector(current, PROC_EVENT_UID);
1035 return security_task_post_setuid(old_ruid, old_euid, old_suid, LSM_SETID_ID);
1040 * This function implements a generic ability to update ruid, euid,
1041 * and suid. This allows you to implement the 4.4 compatible seteuid().
1043 asmlinkage long sys_setresuid(uid_t ruid, uid_t euid, uid_t suid)
1045 int old_ruid = current->uid;
1046 int old_euid = current->euid;
1047 int old_suid = current->suid;
1050 retval = security_task_setuid(ruid, euid, suid, LSM_SETID_RES);
1054 if (!capable(CAP_SETUID)) {
1055 if ((ruid != (uid_t) -1) && (ruid != current->uid) &&
1056 (ruid != current->euid) && (ruid != current->suid))
1058 if ((euid != (uid_t) -1) && (euid != current->uid) &&
1059 (euid != current->euid) && (euid != current->suid))
1061 if ((suid != (uid_t) -1) && (suid != current->uid) &&
1062 (suid != current->euid) && (suid != current->suid))
1065 if (ruid != (uid_t) -1) {
1066 if (ruid != current->uid && set_user(ruid, euid != current->euid) < 0)
1069 if (euid != (uid_t) -1) {
1070 if (euid != current->euid)
1072 current->mm->dumpable = suid_dumpable;
1075 current->euid = euid;
1077 current->fsuid = current->euid;
1078 if (suid != (uid_t) -1)
1079 current->suid = suid;
1081 key_fsuid_changed(current);
1082 proc_id_connector(current, PROC_EVENT_UID);
1084 return security_task_post_setuid(old_ruid, old_euid, old_suid, LSM_SETID_RES);
1087 asmlinkage long sys_getresuid(uid_t __user *ruid, uid_t __user *euid, uid_t __user *suid)
1091 if (!(retval = put_user(current->uid, ruid)) &&
1092 !(retval = put_user(current->euid, euid)))
1093 retval = put_user(current->suid, suid);
1099 * Same as above, but for rgid, egid, sgid.
1101 asmlinkage long sys_setresgid(gid_t rgid, gid_t egid, gid_t sgid)
1105 retval = security_task_setgid(rgid, egid, sgid, LSM_SETID_RES);
1109 if (!capable(CAP_SETGID)) {
1110 if ((rgid != (gid_t) -1) && (rgid != current->gid) &&
1111 (rgid != current->egid) && (rgid != current->sgid))
1113 if ((egid != (gid_t) -1) && (egid != current->gid) &&
1114 (egid != current->egid) && (egid != current->sgid))
1116 if ((sgid != (gid_t) -1) && (sgid != current->gid) &&
1117 (sgid != current->egid) && (sgid != current->sgid))
1120 if (egid != (gid_t) -1) {
1121 if (egid != current->egid)
1123 current->mm->dumpable = suid_dumpable;
1126 current->egid = egid;
1128 current->fsgid = current->egid;
1129 if (rgid != (gid_t) -1)
1130 current->gid = rgid;
1131 if (sgid != (gid_t) -1)
1132 current->sgid = sgid;
1134 key_fsgid_changed(current);
1135 proc_id_connector(current, PROC_EVENT_GID);
1139 asmlinkage long sys_getresgid(gid_t __user *rgid, gid_t __user *egid, gid_t __user *sgid)
1143 if (!(retval = put_user(current->gid, rgid)) &&
1144 !(retval = put_user(current->egid, egid)))
1145 retval = put_user(current->sgid, sgid);
1152 * "setfsuid()" sets the fsuid - the uid used for filesystem checks. This
1153 * is used for "access()" and for the NFS daemon (letting nfsd stay at
1154 * whatever uid it wants to). It normally shadows "euid", except when
1155 * explicitly set by setfsuid() or for access..
1157 asmlinkage long sys_setfsuid(uid_t uid)
1161 old_fsuid = current->fsuid;
1162 if (security_task_setuid(uid, (uid_t)-1, (uid_t)-1, LSM_SETID_FS))
1165 if (uid == current->uid || uid == current->euid ||
1166 uid == current->suid || uid == current->fsuid ||
1167 capable(CAP_SETUID))
1169 if (uid != old_fsuid)
1171 current->mm->dumpable = suid_dumpable;
1174 current->fsuid = uid;
1177 key_fsuid_changed(current);
1178 proc_id_connector(current, PROC_EVENT_UID);
1180 security_task_post_setuid(old_fsuid, (uid_t)-1, (uid_t)-1, LSM_SETID_FS);
1186 * Samma på svenska..
1188 asmlinkage long sys_setfsgid(gid_t gid)
1192 old_fsgid = current->fsgid;
1193 if (security_task_setgid(gid, (gid_t)-1, (gid_t)-1, LSM_SETID_FS))
1196 if (gid == current->gid || gid == current->egid ||
1197 gid == current->sgid || gid == current->fsgid ||
1198 capable(CAP_SETGID))
1200 if (gid != old_fsgid)
1202 current->mm->dumpable = suid_dumpable;
1205 current->fsgid = gid;
1206 key_fsgid_changed(current);
1207 proc_id_connector(current, PROC_EVENT_GID);
1212 asmlinkage long sys_times(struct tms __user * tbuf)
1215 * In the SMP world we might just be unlucky and have one of
1216 * the times increment as we use it. Since the value is an
1217 * atomically safe type this is just fine. Conceptually its
1218 * as if the syscall took an instant longer to occur.
1222 struct task_struct *tsk = current;
1223 struct task_struct *t;
1224 cputime_t utime, stime, cutime, cstime;
1226 spin_lock_irq(&tsk->sighand->siglock);
1227 utime = tsk->signal->utime;
1228 stime = tsk->signal->stime;
1231 utime = cputime_add(utime, t->utime);
1232 stime = cputime_add(stime, t->stime);
1236 cutime = tsk->signal->cutime;
1237 cstime = tsk->signal->cstime;
1238 spin_unlock_irq(&tsk->sighand->siglock);
1240 tmp.tms_utime = cputime_to_clock_t(utime);
1241 tmp.tms_stime = cputime_to_clock_t(stime);
1242 tmp.tms_cutime = cputime_to_clock_t(cutime);
1243 tmp.tms_cstime = cputime_to_clock_t(cstime);
1244 if (copy_to_user(tbuf, &tmp, sizeof(struct tms)))
1247 return (long) jiffies_64_to_clock_t(get_jiffies_64());
1251 * This needs some heavy checking ...
1252 * I just haven't the stomach for it. I also don't fully
1253 * understand sessions/pgrp etc. Let somebody who does explain it.
1255 * OK, I think I have the protection semantics right.... this is really
1256 * only important on a multi-user system anyway, to make sure one user
1257 * can't send a signal to a process owned by another. -TYT, 12/12/91
1259 * Auch. Had to add the 'did_exec' flag to conform completely to POSIX.
1263 asmlinkage long sys_setpgid(pid_t pid, pid_t pgid)
1265 struct task_struct *p;
1266 struct task_struct *group_leader = current->group_leader;
1271 pid = vx_map_pid(group_leader->pid);
1277 rpgid = vx_rmap_pid(pgid);
1279 /* From this point forward we keep holding onto the tasklist lock
1280 * so that our parent does not change from under us. -DaveM
1282 write_lock_irq(&tasklist_lock);
1285 p = find_task_by_pid(pid);
1290 if (!thread_group_leader(p))
1293 if (p->parent == group_leader) {
1295 if (p->signal->session != group_leader->signal->session)
1302 if (p != group_leader)
1307 if (p->signal->leader)
1311 struct task_struct *p;
1313 do_each_task_pid(rpgid, PIDTYPE_PGID, p) {
1314 if (p->signal->session == group_leader->signal->session)
1316 } while_each_task_pid(rpgid, PIDTYPE_PGID, p);
1321 err = security_task_setpgid(p, rpgid);
1325 if (process_group(p) != rpgid) {
1326 detach_pid(p, PIDTYPE_PGID);
1327 p->signal->pgrp = rpgid;
1328 attach_pid(p, PIDTYPE_PGID, rpgid);
1333 /* All paths lead to here, thus we are safe. -DaveM */
1334 write_unlock_irq(&tasklist_lock);
1338 asmlinkage long sys_getpgid(pid_t pid)
1341 return vx_rmap_pid(process_group(current));
1344 struct task_struct *p;
1346 read_lock(&tasklist_lock);
1347 p = find_task_by_pid(pid);
1351 retval = security_task_getpgid(p);
1353 retval = vx_rmap_pid(process_group(p));
1355 read_unlock(&tasklist_lock);
1360 #ifdef __ARCH_WANT_SYS_GETPGRP
1362 asmlinkage long sys_getpgrp(void)
1364 /* SMP - assuming writes are word atomic this is fine */
1365 return process_group(current);
1370 asmlinkage long sys_getsid(pid_t pid)
1373 return current->signal->session;
1376 struct task_struct *p;
1378 read_lock(&tasklist_lock);
1379 p = find_task_by_pid(pid);
1383 retval = security_task_getsid(p);
1385 retval = p->signal->session;
1387 read_unlock(&tasklist_lock);
1392 asmlinkage long sys_setsid(void)
1394 struct task_struct *group_leader = current->group_leader;
1398 write_lock_irq(&tasklist_lock);
1400 /* Fail if I am already a session leader */
1401 if (group_leader->signal->leader)
1404 session = group_leader->pid;
1405 /* Fail if a process group id already exists that equals the
1406 * proposed session id.
1408 * Don't check if session id == 1 because kernel threads use this
1409 * session id and so the check will always fail and make it so
1410 * init cannot successfully call setsid.
1412 if (session > 1 && find_task_by_pid_type(PIDTYPE_PGID, session))
1415 group_leader->signal->leader = 1;
1416 __set_special_pids(session, session);
1418 spin_lock(&group_leader->sighand->siglock);
1419 group_leader->signal->tty = NULL;
1420 group_leader->signal->tty_old_pgrp = 0;
1421 spin_unlock(&group_leader->sighand->siglock);
1423 err = process_group(group_leader);
1425 write_unlock_irq(&tasklist_lock);
1430 * Supplementary group IDs
1433 /* init to 2 - one for init_task, one to ensure it is never freed */
1434 struct group_info init_groups = { .usage = ATOMIC_INIT(2) };
1436 struct group_info *groups_alloc(int gidsetsize)
1438 struct group_info *group_info;
1442 nblocks = (gidsetsize + NGROUPS_PER_BLOCK - 1) / NGROUPS_PER_BLOCK;
1443 /* Make sure we always allocate at least one indirect block pointer */
1444 nblocks = nblocks ? : 1;
1445 group_info = kmalloc(sizeof(*group_info) + nblocks*sizeof(gid_t *), GFP_USER);
1448 group_info->ngroups = gidsetsize;
1449 group_info->nblocks = nblocks;
1450 atomic_set(&group_info->usage, 1);
1452 if (gidsetsize <= NGROUPS_SMALL) {
1453 group_info->blocks[0] = group_info->small_block;
1455 for (i = 0; i < nblocks; i++) {
1457 b = (void *)__get_free_page(GFP_USER);
1459 goto out_undo_partial_alloc;
1460 group_info->blocks[i] = b;
1465 out_undo_partial_alloc:
1467 free_page((unsigned long)group_info->blocks[i]);
1473 EXPORT_SYMBOL(groups_alloc);
1475 void groups_free(struct group_info *group_info)
1477 if (group_info->blocks[0] != group_info->small_block) {
1479 for (i = 0; i < group_info->nblocks; i++)
1480 free_page((unsigned long)group_info->blocks[i]);
1485 EXPORT_SYMBOL(groups_free);
1487 /* export the group_info to a user-space array */
1488 static int groups_to_user(gid_t __user *grouplist,
1489 struct group_info *group_info)
1492 int count = group_info->ngroups;
1494 for (i = 0; i < group_info->nblocks; i++) {
1495 int cp_count = min(NGROUPS_PER_BLOCK, count);
1496 int off = i * NGROUPS_PER_BLOCK;
1497 int len = cp_count * sizeof(*grouplist);
1499 if (copy_to_user(grouplist+off, group_info->blocks[i], len))
1507 /* fill a group_info from a user-space array - it must be allocated already */
1508 static int groups_from_user(struct group_info *group_info,
1509 gid_t __user *grouplist)
1512 int count = group_info->ngroups;
1514 for (i = 0; i < group_info->nblocks; i++) {
1515 int cp_count = min(NGROUPS_PER_BLOCK, count);
1516 int off = i * NGROUPS_PER_BLOCK;
1517 int len = cp_count * sizeof(*grouplist);
1519 if (copy_from_user(group_info->blocks[i], grouplist+off, len))
1527 /* a simple Shell sort */
1528 static void groups_sort(struct group_info *group_info)
1530 int base, max, stride;
1531 int gidsetsize = group_info->ngroups;
1533 for (stride = 1; stride < gidsetsize; stride = 3 * stride + 1)
1538 max = gidsetsize - stride;
1539 for (base = 0; base < max; base++) {
1541 int right = left + stride;
1542 gid_t tmp = GROUP_AT(group_info, right);
1544 while (left >= 0 && GROUP_AT(group_info, left) > tmp) {
1545 GROUP_AT(group_info, right) =
1546 GROUP_AT(group_info, left);
1550 GROUP_AT(group_info, right) = tmp;
1556 /* a simple bsearch */
1557 int groups_search(struct group_info *group_info, gid_t grp)
1559 unsigned int left, right;
1565 right = group_info->ngroups;
1566 while (left < right) {
1567 unsigned int mid = (left+right)/2;
1568 int cmp = grp - GROUP_AT(group_info, mid);
1579 /* validate and set current->group_info */
1580 int set_current_groups(struct group_info *group_info)
1583 struct group_info *old_info;
1585 retval = security_task_setgroups(group_info);
1589 groups_sort(group_info);
1590 get_group_info(group_info);
1593 old_info = current->group_info;
1594 current->group_info = group_info;
1595 task_unlock(current);
1597 put_group_info(old_info);
1602 EXPORT_SYMBOL(set_current_groups);
1604 asmlinkage long sys_getgroups(int gidsetsize, gid_t __user *grouplist)
1609 * SMP: Nobody else can change our grouplist. Thus we are
1616 /* no need to grab task_lock here; it cannot change */
1617 i = current->group_info->ngroups;
1619 if (i > gidsetsize) {
1623 if (groups_to_user(grouplist, current->group_info)) {
1633 * SMP: Our groups are copy-on-write. We can set them safely
1634 * without another task interfering.
1637 asmlinkage long sys_setgroups(int gidsetsize, gid_t __user *grouplist)
1639 struct group_info *group_info;
1642 if (!capable(CAP_SETGID))
1644 if ((unsigned)gidsetsize > NGROUPS_MAX)
1647 group_info = groups_alloc(gidsetsize);
1650 retval = groups_from_user(group_info, grouplist);
1652 put_group_info(group_info);
1656 retval = set_current_groups(group_info);
1657 put_group_info(group_info);
1663 * Check whether we're fsgid/egid or in the supplemental group..
1665 int in_group_p(gid_t grp)
1668 if (grp != current->fsgid) {
1669 retval = groups_search(current->group_info, grp);
1674 EXPORT_SYMBOL(in_group_p);
1676 int in_egroup_p(gid_t grp)
1679 if (grp != current->egid) {
1680 retval = groups_search(current->group_info, grp);
1685 EXPORT_SYMBOL(in_egroup_p);
1687 DECLARE_RWSEM(uts_sem);
1689 EXPORT_SYMBOL(uts_sem);
1691 asmlinkage long sys_newuname(struct new_utsname __user * name)
1695 down_read(&uts_sem);
1696 if (copy_to_user(name, vx_new_utsname(), sizeof *name))
1702 asmlinkage long sys_sethostname(char __user *name, int len)
1705 char tmp[__NEW_UTS_LEN];
1707 if (!vx_capable(CAP_SYS_ADMIN, VXC_SET_UTSNAME))
1709 if (len < 0 || len > __NEW_UTS_LEN)
1711 down_write(&uts_sem);
1713 if (!copy_from_user(tmp, name, len)) {
1714 char *ptr = vx_new_uts(nodename);
1716 memcpy(ptr, tmp, len);
1724 #ifdef __ARCH_WANT_SYS_GETHOSTNAME
1726 asmlinkage long sys_gethostname(char __user *name, int len)
1733 down_read(&uts_sem);
1734 ptr = vx_new_uts(nodename);
1735 i = 1 + strlen(ptr);
1739 if (copy_to_user(name, ptr, i))
1748 * Only setdomainname; getdomainname can be implemented by calling
1751 asmlinkage long sys_setdomainname(char __user *name, int len)
1754 char tmp[__NEW_UTS_LEN];
1756 if (!vx_capable(CAP_SYS_ADMIN, VXC_SET_UTSNAME))
1758 if (len < 0 || len > __NEW_UTS_LEN)
1761 down_write(&uts_sem);
1763 if (!copy_from_user(tmp, name, len)) {
1764 char *ptr = vx_new_uts(domainname);
1766 memcpy(ptr, tmp, len);
1774 asmlinkage long sys_getrlimit(unsigned int resource, struct rlimit __user *rlim)
1776 if (resource >= RLIM_NLIMITS)
1779 struct rlimit value;
1780 task_lock(current->group_leader);
1781 value = current->signal->rlim[resource];
1782 task_unlock(current->group_leader);
1783 return copy_to_user(rlim, &value, sizeof(*rlim)) ? -EFAULT : 0;
1787 #ifdef __ARCH_WANT_SYS_OLD_GETRLIMIT
1790 * Back compatibility for getrlimit. Needed for some apps.
1793 asmlinkage long sys_old_getrlimit(unsigned int resource, struct rlimit __user *rlim)
1796 if (resource >= RLIM_NLIMITS)
1799 task_lock(current->group_leader);
1800 x = current->signal->rlim[resource];
1801 task_unlock(current->group_leader);
1802 if(x.rlim_cur > 0x7FFFFFFF)
1803 x.rlim_cur = 0x7FFFFFFF;
1804 if(x.rlim_max > 0x7FFFFFFF)
1805 x.rlim_max = 0x7FFFFFFF;
1806 return copy_to_user(rlim, &x, sizeof(x))?-EFAULT:0;
1811 asmlinkage long sys_setrlimit(unsigned int resource, struct rlimit __user *rlim)
1813 struct rlimit new_rlim, *old_rlim;
1814 unsigned long it_prof_secs;
1817 if (resource >= RLIM_NLIMITS)
1819 if (copy_from_user(&new_rlim, rlim, sizeof(*rlim)))
1821 if (new_rlim.rlim_cur > new_rlim.rlim_max)
1823 old_rlim = current->signal->rlim + resource;
1824 if ((new_rlim.rlim_max > old_rlim->rlim_max) &&
1825 !vx_capable(CAP_SYS_RESOURCE, VXC_SET_RLIMIT))
1827 if (resource == RLIMIT_NOFILE && new_rlim.rlim_max > NR_OPEN)
1830 retval = security_task_setrlimit(resource, &new_rlim);
1834 task_lock(current->group_leader);
1835 *old_rlim = new_rlim;
1836 task_unlock(current->group_leader);
1838 if (resource != RLIMIT_CPU)
1842 * RLIMIT_CPU handling. Note that the kernel fails to return an error
1843 * code if it rejected the user's attempt to set RLIMIT_CPU. This is a
1844 * very long-standing error, and fixing it now risks breakage of
1845 * applications, so we live with it
1847 if (new_rlim.rlim_cur == RLIM_INFINITY)
1850 it_prof_secs = cputime_to_secs(current->signal->it_prof_expires);
1851 if (it_prof_secs == 0 || new_rlim.rlim_cur <= it_prof_secs) {
1852 unsigned long rlim_cur = new_rlim.rlim_cur;
1855 if (rlim_cur == 0) {
1857 * The caller is asking for an immediate RLIMIT_CPU
1858 * expiry. But we use the zero value to mean "it was
1859 * never set". So let's cheat and make it one second
1864 cputime = secs_to_cputime(rlim_cur);
1865 read_lock(&tasklist_lock);
1866 spin_lock_irq(¤t->sighand->siglock);
1867 set_process_cpu_timer(current, CPUCLOCK_PROF, &cputime, NULL);
1868 spin_unlock_irq(¤t->sighand->siglock);
1869 read_unlock(&tasklist_lock);
1876 * It would make sense to put struct rusage in the task_struct,
1877 * except that would make the task_struct be *really big*. After
1878 * task_struct gets moved into malloc'ed memory, it would
1879 * make sense to do this. It will make moving the rest of the information
1880 * a lot simpler! (Which we're not doing right now because we're not
1881 * measuring them yet).
1883 * When sampling multiple threads for RUSAGE_SELF, under SMP we might have
1884 * races with threads incrementing their own counters. But since word
1885 * reads are atomic, we either get new values or old values and we don't
1886 * care which for the sums. We always take the siglock to protect reading
1887 * the c* fields from p->signal from races with exit.c updating those
1888 * fields when reaping, so a sample either gets all the additions of a
1889 * given child after it's reaped, or none so this sample is before reaping.
1892 * We need to take the siglock for CHILDEREN, SELF and BOTH
1893 * for the cases current multithreaded, non-current single threaded
1894 * non-current multithreaded. Thread traversal is now safe with
1896 * Strictly speaking, we donot need to take the siglock if we are current and
1897 * single threaded, as no one else can take our signal_struct away, no one
1898 * else can reap the children to update signal->c* counters, and no one else
1899 * can race with the signal-> fields. If we do not take any lock, the
1900 * signal-> fields could be read out of order while another thread was just
1901 * exiting. So we should place a read memory barrier when we avoid the lock.
1902 * On the writer side, write memory barrier is implied in __exit_signal
1903 * as __exit_signal releases the siglock spinlock after updating the signal->
1904 * fields. But we don't do this yet to keep things simple.
1908 static void k_getrusage(struct task_struct *p, int who, struct rusage *r)
1910 struct task_struct *t;
1911 unsigned long flags;
1912 cputime_t utime, stime;
1914 memset((char *) r, 0, sizeof *r);
1915 utime = stime = cputime_zero;
1918 if (!lock_task_sighand(p, &flags)) {
1925 case RUSAGE_CHILDREN:
1926 utime = p->signal->cutime;
1927 stime = p->signal->cstime;
1928 r->ru_nvcsw = p->signal->cnvcsw;
1929 r->ru_nivcsw = p->signal->cnivcsw;
1930 r->ru_minflt = p->signal->cmin_flt;
1931 r->ru_majflt = p->signal->cmaj_flt;
1933 if (who == RUSAGE_CHILDREN)
1937 utime = cputime_add(utime, p->signal->utime);
1938 stime = cputime_add(stime, p->signal->stime);
1939 r->ru_nvcsw += p->signal->nvcsw;
1940 r->ru_nivcsw += p->signal->nivcsw;
1941 r->ru_minflt += p->signal->min_flt;
1942 r->ru_majflt += p->signal->maj_flt;
1945 utime = cputime_add(utime, t->utime);
1946 stime = cputime_add(stime, t->stime);
1947 r->ru_nvcsw += t->nvcsw;
1948 r->ru_nivcsw += t->nivcsw;
1949 r->ru_minflt += t->min_flt;
1950 r->ru_majflt += t->maj_flt;
1959 unlock_task_sighand(p, &flags);
1962 cputime_to_timeval(utime, &r->ru_utime);
1963 cputime_to_timeval(stime, &r->ru_stime);
1966 int getrusage(struct task_struct *p, int who, struct rusage __user *ru)
1969 k_getrusage(p, who, &r);
1970 return copy_to_user(ru, &r, sizeof(r)) ? -EFAULT : 0;
1973 asmlinkage long sys_getrusage(int who, struct rusage __user *ru)
1975 if (who != RUSAGE_SELF && who != RUSAGE_CHILDREN)
1977 return getrusage(current, who, ru);
1980 asmlinkage long sys_umask(int mask)
1982 mask = xchg(¤t->fs->umask, mask & S_IRWXUGO);
1986 asmlinkage long sys_prctl(int option, unsigned long arg2, unsigned long arg3,
1987 unsigned long arg4, unsigned long arg5)
1991 error = security_task_prctl(option, arg2, arg3, arg4, arg5);
1996 case PR_SET_PDEATHSIG:
1997 if (!valid_signal(arg2)) {
2001 current->pdeath_signal = arg2;
2003 case PR_GET_PDEATHSIG:
2004 error = put_user(current->pdeath_signal, (int __user *)arg2);
2006 case PR_GET_DUMPABLE:
2007 error = current->mm->dumpable;
2009 case PR_SET_DUMPABLE:
2010 if (arg2 < 0 || arg2 > 1) {
2014 current->mm->dumpable = arg2;
2017 case PR_SET_UNALIGN:
2018 error = SET_UNALIGN_CTL(current, arg2);
2020 case PR_GET_UNALIGN:
2021 error = GET_UNALIGN_CTL(current, arg2);
2024 error = SET_FPEMU_CTL(current, arg2);
2027 error = GET_FPEMU_CTL(current, arg2);
2030 error = SET_FPEXC_CTL(current, arg2);
2033 error = GET_FPEXC_CTL(current, arg2);
2036 error = PR_TIMING_STATISTICAL;
2039 if (arg2 == PR_TIMING_STATISTICAL)
2045 case PR_GET_KEEPCAPS:
2046 if (current->keep_capabilities)
2049 case PR_SET_KEEPCAPS:
2050 if (arg2 != 0 && arg2 != 1) {
2054 current->keep_capabilities = arg2;
2057 struct task_struct *me = current;
2058 unsigned char ncomm[sizeof(me->comm)];
2060 ncomm[sizeof(me->comm)-1] = 0;
2061 if (strncpy_from_user(ncomm, (char __user *)arg2,
2062 sizeof(me->comm)-1) < 0)
2064 set_task_comm(me, ncomm);
2068 struct task_struct *me = current;
2069 unsigned char tcomm[sizeof(me->comm)];
2071 get_task_comm(tcomm, me);
2072 if (copy_to_user((char __user *)arg2, tcomm, sizeof(tcomm)))
2077 error = GET_ENDIAN(current, arg2);
2080 error = SET_ENDIAN(current, arg2);