4 * Copyright (C) 1991, 1992 Linus Torvalds
7 #include <linux/config.h>
8 #include <linux/module.h>
10 #include <linux/utsname.h>
11 #include <linux/mman.h>
12 #include <linux/smp_lock.h>
13 #include <linux/notifier.h>
14 #include <linux/kmod.h>
15 #include <linux/reboot.h>
16 #include <linux/prctl.h>
17 #include <linux/init.h>
18 #include <linux/highuid.h>
20 #include <linux/kernel.h>
21 #include <linux/kexec.h>
22 #include <linux/workqueue.h>
23 #include <linux/capability.h>
24 #include <linux/device.h>
25 #include <linux/key.h>
26 #include <linux/times.h>
27 #include <linux/posix-timers.h>
28 #include <linux/security.h>
29 #include <linux/dcookies.h>
30 #include <linux/suspend.h>
31 #include <linux/tty.h>
32 #include <linux/signal.h>
33 #include <linux/cn_proc.h>
34 #include <linux/vs_base.h>
35 #include <linux/vs_cvirt.h>
37 #include <linux/compat.h>
38 #include <linux/syscalls.h>
39 #include <linux/kprobes.h>
41 #include <asm/uaccess.h>
43 #include <asm/unistd.h>
45 #ifndef SET_UNALIGN_CTL
46 # define SET_UNALIGN_CTL(a,b) (-EINVAL)
48 #ifndef GET_UNALIGN_CTL
49 # define GET_UNALIGN_CTL(a,b) (-EINVAL)
52 # define SET_FPEMU_CTL(a,b) (-EINVAL)
55 # define GET_FPEMU_CTL(a,b) (-EINVAL)
58 # define SET_FPEXC_CTL(a,b) (-EINVAL)
61 # define GET_FPEXC_CTL(a,b) (-EINVAL)
65 * this is where the system-wide overflow UID and GID are defined, for
66 * architectures that now have 32-bit UID/GID but didn't in the past
69 int overflowuid = DEFAULT_OVERFLOWUID;
70 int overflowgid = DEFAULT_OVERFLOWGID;
73 EXPORT_SYMBOL(overflowuid);
74 EXPORT_SYMBOL(overflowgid);
78 * the same as above, but for filesystems which can only store a 16-bit
79 * UID and GID. as such, this is needed on all architectures
82 int fs_overflowuid = DEFAULT_FS_OVERFLOWUID;
83 int fs_overflowgid = DEFAULT_FS_OVERFLOWUID;
85 EXPORT_SYMBOL(fs_overflowuid);
86 EXPORT_SYMBOL(fs_overflowgid);
89 * this indicates whether you can reboot with ctrl-alt-del: the default is yes
96 * Notifier list for kernel code which wants to be called
97 * at shutdown. This is used to stop any idling DMA operations
101 static BLOCKING_NOTIFIER_HEAD(reboot_notifier_list);
104 * Notifier chain core routines. The exported routines below
105 * are layered on top of these, with appropriate locking added.
108 static int notifier_chain_register(struct notifier_block **nl,
109 struct notifier_block *n)
111 while ((*nl) != NULL) {
112 if (n->priority > (*nl)->priority)
117 rcu_assign_pointer(*nl, n);
121 static int notifier_chain_unregister(struct notifier_block **nl,
122 struct notifier_block *n)
124 while ((*nl) != NULL) {
126 rcu_assign_pointer(*nl, n->next);
134 static int __kprobes notifier_call_chain(struct notifier_block **nl,
135 unsigned long val, void *v)
137 int ret = NOTIFY_DONE;
138 struct notifier_block *nb;
140 nb = rcu_dereference(*nl);
142 ret = nb->notifier_call(nb, val, v);
143 if ((ret & NOTIFY_STOP_MASK) == NOTIFY_STOP_MASK)
145 nb = rcu_dereference(nb->next);
151 * Atomic notifier chain routines. Registration and unregistration
152 * use a mutex, and call_chain is synchronized by RCU (no locks).
156 * atomic_notifier_chain_register - Add notifier to an atomic notifier chain
157 * @nh: Pointer to head of the atomic notifier chain
158 * @n: New entry in notifier chain
160 * Adds a notifier to an atomic notifier chain.
162 * Currently always returns zero.
165 int atomic_notifier_chain_register(struct atomic_notifier_head *nh,
166 struct notifier_block *n)
171 spin_lock_irqsave(&nh->lock, flags);
172 ret = notifier_chain_register(&nh->head, n);
173 spin_unlock_irqrestore(&nh->lock, flags);
177 EXPORT_SYMBOL_GPL(atomic_notifier_chain_register);
180 * atomic_notifier_chain_unregister - Remove notifier from an atomic notifier chain
181 * @nh: Pointer to head of the atomic notifier chain
182 * @n: Entry to remove from notifier chain
184 * Removes a notifier from an atomic notifier chain.
186 * Returns zero on success or %-ENOENT on failure.
188 int atomic_notifier_chain_unregister(struct atomic_notifier_head *nh,
189 struct notifier_block *n)
194 spin_lock_irqsave(&nh->lock, flags);
195 ret = notifier_chain_unregister(&nh->head, n);
196 spin_unlock_irqrestore(&nh->lock, flags);
201 EXPORT_SYMBOL_GPL(atomic_notifier_chain_unregister);
204 * atomic_notifier_call_chain - Call functions in an atomic notifier chain
205 * @nh: Pointer to head of the atomic notifier chain
206 * @val: Value passed unmodified to notifier function
207 * @v: Pointer passed unmodified to notifier function
209 * Calls each function in a notifier chain in turn. The functions
210 * run in an atomic context, so they must not block.
211 * This routine uses RCU to synchronize with changes to the chain.
213 * If the return value of the notifier can be and'ed
214 * with %NOTIFY_STOP_MASK then atomic_notifier_call_chain
215 * will return immediately, with the return value of
216 * the notifier function which halted execution.
217 * Otherwise the return value is the return value
218 * of the last notifier function called.
221 int atomic_notifier_call_chain(struct atomic_notifier_head *nh,
222 unsigned long val, void *v)
227 ret = notifier_call_chain(&nh->head, val, v);
232 EXPORT_SYMBOL_GPL(atomic_notifier_call_chain);
235 * Blocking notifier chain routines. All access to the chain is
236 * synchronized by an rwsem.
240 * blocking_notifier_chain_register - Add notifier to a blocking notifier chain
241 * @nh: Pointer to head of the blocking notifier chain
242 * @n: New entry in notifier chain
244 * Adds a notifier to a blocking notifier chain.
245 * Must be called in process context.
247 * Currently always returns zero.
250 int blocking_notifier_chain_register(struct blocking_notifier_head *nh,
251 struct notifier_block *n)
256 * This code gets used during boot-up, when task switching is
257 * not yet working and interrupts must remain disabled. At
258 * such times we must not call down_write().
260 if (unlikely(system_state == SYSTEM_BOOTING))
261 return notifier_chain_register(&nh->head, n);
263 down_write(&nh->rwsem);
264 ret = notifier_chain_register(&nh->head, n);
265 up_write(&nh->rwsem);
269 EXPORT_SYMBOL_GPL(blocking_notifier_chain_register);
272 * blocking_notifier_chain_unregister - Remove notifier from a blocking notifier chain
273 * @nh: Pointer to head of the blocking notifier chain
274 * @n: Entry to remove from notifier chain
276 * Removes a notifier from a blocking notifier chain.
277 * Must be called from process context.
279 * Returns zero on success or %-ENOENT on failure.
281 int blocking_notifier_chain_unregister(struct blocking_notifier_head *nh,
282 struct notifier_block *n)
287 * This code gets used during boot-up, when task switching is
288 * not yet working and interrupts must remain disabled. At
289 * such times we must not call down_write().
291 if (unlikely(system_state == SYSTEM_BOOTING))
292 return notifier_chain_unregister(&nh->head, n);
294 down_write(&nh->rwsem);
295 ret = notifier_chain_unregister(&nh->head, n);
296 up_write(&nh->rwsem);
300 EXPORT_SYMBOL_GPL(blocking_notifier_chain_unregister);
303 * blocking_notifier_call_chain - Call functions in a blocking notifier chain
304 * @nh: Pointer to head of the blocking notifier chain
305 * @val: Value passed unmodified to notifier function
306 * @v: Pointer passed unmodified to notifier function
308 * Calls each function in a notifier chain in turn. The functions
309 * run in a process context, so they are allowed to block.
311 * If the return value of the notifier can be and'ed
312 * with %NOTIFY_STOP_MASK then blocking_notifier_call_chain
313 * will return immediately, with the return value of
314 * the notifier function which halted execution.
315 * Otherwise the return value is the return value
316 * of the last notifier function called.
319 int blocking_notifier_call_chain(struct blocking_notifier_head *nh,
320 unsigned long val, void *v)
324 down_read(&nh->rwsem);
325 ret = notifier_call_chain(&nh->head, val, v);
330 EXPORT_SYMBOL_GPL(blocking_notifier_call_chain);
333 * Raw notifier chain routines. There is no protection;
334 * the caller must provide it. Use at your own risk!
338 * raw_notifier_chain_register - Add notifier to a raw notifier chain
339 * @nh: Pointer to head of the raw notifier chain
340 * @n: New entry in notifier chain
342 * Adds a notifier to a raw notifier chain.
343 * All locking must be provided by the caller.
345 * Currently always returns zero.
348 int raw_notifier_chain_register(struct raw_notifier_head *nh,
349 struct notifier_block *n)
351 return notifier_chain_register(&nh->head, n);
354 EXPORT_SYMBOL_GPL(raw_notifier_chain_register);
357 * raw_notifier_chain_unregister - Remove notifier from a raw notifier chain
358 * @nh: Pointer to head of the raw notifier chain
359 * @n: Entry to remove from notifier chain
361 * Removes a notifier from a raw notifier chain.
362 * All locking must be provided by the caller.
364 * Returns zero on success or %-ENOENT on failure.
366 int raw_notifier_chain_unregister(struct raw_notifier_head *nh,
367 struct notifier_block *n)
369 return notifier_chain_unregister(&nh->head, n);
372 EXPORT_SYMBOL_GPL(raw_notifier_chain_unregister);
375 * raw_notifier_call_chain - Call functions in a raw notifier chain
376 * @nh: Pointer to head of the raw notifier chain
377 * @val: Value passed unmodified to notifier function
378 * @v: Pointer passed unmodified to notifier function
380 * Calls each function in a notifier chain in turn. The functions
381 * run in an undefined context.
382 * All locking must be provided by the caller.
384 * If the return value of the notifier can be and'ed
385 * with %NOTIFY_STOP_MASK then raw_notifier_call_chain
386 * will return immediately, with the return value of
387 * the notifier function which halted execution.
388 * Otherwise the return value is the return value
389 * of the last notifier function called.
392 int raw_notifier_call_chain(struct raw_notifier_head *nh,
393 unsigned long val, void *v)
395 return notifier_call_chain(&nh->head, val, v);
398 EXPORT_SYMBOL_GPL(raw_notifier_call_chain);
401 * register_reboot_notifier - Register function to be called at reboot time
402 * @nb: Info about notifier function to be called
404 * Registers a function with the list of functions
405 * to be called at reboot time.
407 * Currently always returns zero, as blocking_notifier_chain_register
408 * always returns zero.
411 int register_reboot_notifier(struct notifier_block * nb)
413 return blocking_notifier_chain_register(&reboot_notifier_list, nb);
416 EXPORT_SYMBOL(register_reboot_notifier);
419 * unregister_reboot_notifier - Unregister previously registered reboot notifier
420 * @nb: Hook to be unregistered
422 * Unregisters a previously registered reboot
425 * Returns zero on success, or %-ENOENT on failure.
428 int unregister_reboot_notifier(struct notifier_block * nb)
430 return blocking_notifier_chain_unregister(&reboot_notifier_list, nb);
433 EXPORT_SYMBOL(unregister_reboot_notifier);
435 static int set_one_prio(struct task_struct *p, int niceval, int error)
439 if (p->uid != current->euid &&
440 p->euid != current->euid && !capable(CAP_SYS_NICE)) {
444 if (niceval < task_nice(p) && !can_nice(p, niceval)) {
445 if (vx_flags(VXF_IGNEG_NICE, 0))
451 no_nice = security_task_setnice(p, niceval);
458 set_user_nice(p, niceval);
463 asmlinkage long sys_setpriority(int which, int who, int niceval)
465 struct task_struct *g, *p;
466 struct user_struct *user;
469 if (which > 2 || which < 0)
472 /* normalize: avoid signed division (rounding problems) */
479 read_lock(&tasklist_lock);
484 p = find_task_by_pid(who);
486 error = set_one_prio(p, niceval, error);
490 who = process_group(current);
491 do_each_task_pid(who, PIDTYPE_PGID, p) {
492 error = set_one_prio(p, niceval, error);
493 } while_each_task_pid(who, PIDTYPE_PGID, p);
496 user = current->user;
500 if ((who != current->uid) &&
501 !(user = find_user(vx_current_xid(), who)))
502 goto out_unlock; /* No processes for this user */
506 error = set_one_prio(p, niceval, error);
507 while_each_thread(g, p);
508 if (who != current->uid)
509 free_uid(user); /* For find_user() */
513 read_unlock(&tasklist_lock);
519 * Ugh. To avoid negative return values, "getpriority()" will
520 * not return the normal nice-value, but a negated value that
521 * has been offset by 20 (ie it returns 40..1 instead of -20..19)
522 * to stay compatible.
524 asmlinkage long sys_getpriority(int which, int who)
526 struct task_struct *g, *p;
527 struct user_struct *user;
528 long niceval, retval = -ESRCH;
530 if (which > 2 || which < 0)
533 read_lock(&tasklist_lock);
538 p = find_task_by_pid(who);
540 niceval = 20 - task_nice(p);
541 if (niceval > retval)
547 who = process_group(current);
548 do_each_task_pid(who, PIDTYPE_PGID, p) {
549 niceval = 20 - task_nice(p);
550 if (niceval > retval)
552 } while_each_task_pid(who, PIDTYPE_PGID, p);
555 user = current->user;
559 if ((who != current->uid) &&
560 !(user = find_user(vx_current_xid(), who)))
561 goto out_unlock; /* No processes for this user */
565 niceval = 20 - task_nice(p);
566 if (niceval > retval)
569 while_each_thread(g, p);
570 if (who != current->uid)
571 free_uid(user); /* for find_user() */
575 read_unlock(&tasklist_lock);
581 * emergency_restart - reboot the system
583 * Without shutting down any hardware or taking any locks
584 * reboot the system. This is called when we know we are in
585 * trouble so this is our best effort to reboot. This is
586 * safe to call in interrupt context.
588 void emergency_restart(void)
590 machine_emergency_restart();
592 EXPORT_SYMBOL_GPL(emergency_restart);
594 void kernel_restart_prepare(char *cmd)
596 blocking_notifier_call_chain(&reboot_notifier_list, SYS_RESTART, cmd);
597 system_state = SYSTEM_RESTART;
602 * kernel_restart - reboot the system
603 * @cmd: pointer to buffer containing command to execute for restart
606 * Shutdown everything and perform a clean reboot.
607 * This is not safe to call in interrupt context.
609 void kernel_restart(char *cmd)
611 kernel_restart_prepare(cmd);
613 printk(KERN_EMERG "Restarting system.\n");
615 printk(KERN_EMERG "Restarting system with command '%s'.\n", cmd);
618 machine_restart(cmd);
620 EXPORT_SYMBOL_GPL(kernel_restart);
623 * kernel_kexec - reboot the system
625 * Move into place and start executing a preloaded standalone
626 * executable. If nothing was preloaded return an error.
628 void kernel_kexec(void)
631 struct kimage *image;
632 image = xchg(&kexec_image, NULL);
636 kernel_restart_prepare(NULL);
637 printk(KERN_EMERG "Starting new kernel\n");
639 machine_kexec(image);
642 EXPORT_SYMBOL_GPL(kernel_kexec);
644 void kernel_shutdown_prepare(enum system_states state)
646 blocking_notifier_call_chain(&reboot_notifier_list,
647 (state == SYSTEM_HALT)?SYS_HALT:SYS_POWER_OFF, NULL);
648 system_state = state;
652 * kernel_halt - halt the system
654 * Shutdown everything and perform a clean system halt.
656 void kernel_halt(void)
658 kernel_shutdown_prepare(SYSTEM_HALT);
659 printk(KERN_EMERG "System halted.\n");
663 EXPORT_SYMBOL_GPL(kernel_halt);
666 * kernel_power_off - power_off the system
668 * Shutdown everything and perform a clean system power_off.
670 void kernel_power_off(void)
672 kernel_shutdown_prepare(SYSTEM_POWER_OFF);
673 printk(KERN_EMERG "Power down.\n");
676 EXPORT_SYMBOL_GPL(kernel_power_off);
678 long vs_reboot(unsigned int, void __user *);
681 * Reboot system call: for obvious reasons only root may call it,
682 * and even root needs to set up some magic numbers in the registers
683 * so that some mistake won't make this reboot the whole machine.
684 * You can also set the meaning of the ctrl-alt-del-key here.
686 * reboot doesn't sync: do that yourself before calling this.
688 asmlinkage long sys_reboot(int magic1, int magic2, unsigned int cmd, void __user * arg)
692 /* We only trust the superuser with rebooting the system. */
693 if (!capable(CAP_SYS_BOOT))
696 /* For safety, we require "magic" arguments. */
697 if (magic1 != LINUX_REBOOT_MAGIC1 ||
698 (magic2 != LINUX_REBOOT_MAGIC2 &&
699 magic2 != LINUX_REBOOT_MAGIC2A &&
700 magic2 != LINUX_REBOOT_MAGIC2B &&
701 magic2 != LINUX_REBOOT_MAGIC2C))
704 /* Instead of trying to make the power_off code look like
705 * halt when pm_power_off is not set do it the easy way.
707 if ((cmd == LINUX_REBOOT_CMD_POWER_OFF) && !pm_power_off)
708 cmd = LINUX_REBOOT_CMD_HALT;
710 if (!vx_check(0, VX_ADMIN|VX_WATCH))
711 return vs_reboot(cmd, arg);
715 case LINUX_REBOOT_CMD_RESTART:
716 kernel_restart(NULL);
719 case LINUX_REBOOT_CMD_CAD_ON:
723 case LINUX_REBOOT_CMD_CAD_OFF:
727 case LINUX_REBOOT_CMD_HALT:
733 case LINUX_REBOOT_CMD_POWER_OFF:
739 case LINUX_REBOOT_CMD_RESTART2:
740 if (strncpy_from_user(&buffer[0], arg, sizeof(buffer) - 1) < 0) {
744 buffer[sizeof(buffer) - 1] = '\0';
746 kernel_restart(buffer);
749 case LINUX_REBOOT_CMD_KEXEC:
754 #ifdef CONFIG_SOFTWARE_SUSPEND
755 case LINUX_REBOOT_CMD_SW_SUSPEND:
757 int ret = software_suspend();
771 static void deferred_cad(void *dummy)
773 kernel_restart(NULL);
777 * This function gets called by ctrl-alt-del - ie the keyboard interrupt.
778 * As it's called within an interrupt, it may NOT sync: the only choice
779 * is whether to reboot at once, or just ignore the ctrl-alt-del.
781 void ctrl_alt_del(void)
783 static DECLARE_WORK(cad_work, deferred_cad, NULL);
786 schedule_work(&cad_work);
788 kill_proc(cad_pid, SIGINT, 1);
793 * Unprivileged users may change the real gid to the effective gid
794 * or vice versa. (BSD-style)
796 * If you set the real gid at all, or set the effective gid to a value not
797 * equal to the real gid, then the saved gid is set to the new effective gid.
799 * This makes it possible for a setgid program to completely drop its
800 * privileges, which is often a useful assertion to make when you are doing
801 * a security audit over a program.
803 * The general idea is that a program which uses just setregid() will be
804 * 100% compatible with BSD. A program which uses just setgid() will be
805 * 100% compatible with POSIX with saved IDs.
807 * SMP: There are not races, the GIDs are checked only by filesystem
808 * operations (as far as semantic preservation is concerned).
810 asmlinkage long sys_setregid(gid_t rgid, gid_t egid)
812 int old_rgid = current->gid;
813 int old_egid = current->egid;
814 int new_rgid = old_rgid;
815 int new_egid = old_egid;
818 retval = security_task_setgid(rgid, egid, (gid_t)-1, LSM_SETID_RE);
822 if (rgid != (gid_t) -1) {
823 if ((old_rgid == rgid) ||
824 (current->egid==rgid) ||
830 if (egid != (gid_t) -1) {
831 if ((old_rgid == egid) ||
832 (current->egid == egid) ||
833 (current->sgid == egid) ||
840 if (new_egid != old_egid)
842 current->mm->dumpable = suid_dumpable;
845 if (rgid != (gid_t) -1 ||
846 (egid != (gid_t) -1 && egid != old_rgid))
847 current->sgid = new_egid;
848 current->fsgid = new_egid;
849 current->egid = new_egid;
850 current->gid = new_rgid;
851 key_fsgid_changed(current);
852 proc_id_connector(current, PROC_EVENT_GID);
857 * setgid() is implemented like SysV w/ SAVED_IDS
859 * SMP: Same implicit races as above.
861 asmlinkage long sys_setgid(gid_t gid)
863 int old_egid = current->egid;
866 retval = security_task_setgid(gid, (gid_t)-1, (gid_t)-1, LSM_SETID_ID);
870 if (capable(CAP_SETGID))
874 current->mm->dumpable = suid_dumpable;
877 current->gid = current->egid = current->sgid = current->fsgid = gid;
879 else if ((gid == current->gid) || (gid == current->sgid))
883 current->mm->dumpable = suid_dumpable;
886 current->egid = current->fsgid = gid;
891 key_fsgid_changed(current);
892 proc_id_connector(current, PROC_EVENT_GID);
896 static int set_user(uid_t new_ruid, int dumpclear)
898 struct user_struct *new_user;
900 new_user = alloc_uid(vx_current_xid(), new_ruid);
904 if (atomic_read(&new_user->processes) >=
905 current->signal->rlim[RLIMIT_NPROC].rlim_cur &&
906 new_user != &root_user) {
911 switch_uid(new_user);
915 current->mm->dumpable = suid_dumpable;
918 current->uid = new_ruid;
923 * Unprivileged users may change the real uid to the effective uid
924 * or vice versa. (BSD-style)
926 * If you set the real uid at all, or set the effective uid to a value not
927 * equal to the real uid, then the saved uid is set to the new effective uid.
929 * This makes it possible for a setuid program to completely drop its
930 * privileges, which is often a useful assertion to make when you are doing
931 * a security audit over a program.
933 * The general idea is that a program which uses just setreuid() will be
934 * 100% compatible with BSD. A program which uses just setuid() will be
935 * 100% compatible with POSIX with saved IDs.
937 asmlinkage long sys_setreuid(uid_t ruid, uid_t euid)
939 int old_ruid, old_euid, old_suid, new_ruid, new_euid;
942 retval = security_task_setuid(ruid, euid, (uid_t)-1, LSM_SETID_RE);
946 new_ruid = old_ruid = current->uid;
947 new_euid = old_euid = current->euid;
948 old_suid = current->suid;
950 if (ruid != (uid_t) -1) {
952 if ((old_ruid != ruid) &&
953 (current->euid != ruid) &&
954 !capable(CAP_SETUID))
958 if (euid != (uid_t) -1) {
960 if ((old_ruid != euid) &&
961 (current->euid != euid) &&
962 (current->suid != euid) &&
963 !capable(CAP_SETUID))
967 if (new_ruid != old_ruid && set_user(new_ruid, new_euid != old_euid) < 0)
970 if (new_euid != old_euid)
972 current->mm->dumpable = suid_dumpable;
975 current->fsuid = current->euid = new_euid;
976 if (ruid != (uid_t) -1 ||
977 (euid != (uid_t) -1 && euid != old_ruid))
978 current->suid = current->euid;
979 current->fsuid = current->euid;
981 key_fsuid_changed(current);
982 proc_id_connector(current, PROC_EVENT_UID);
984 return security_task_post_setuid(old_ruid, old_euid, old_suid, LSM_SETID_RE);
990 * setuid() is implemented like SysV with SAVED_IDS
992 * Note that SAVED_ID's is deficient in that a setuid root program
993 * like sendmail, for example, cannot set its uid to be a normal
994 * user and then switch back, because if you're root, setuid() sets
995 * the saved uid too. If you don't like this, blame the bright people
996 * in the POSIX committee and/or USG. Note that the BSD-style setreuid()
997 * will allow a root program to temporarily drop privileges and be able to
998 * regain them by swapping the real and effective uid.
1000 asmlinkage long sys_setuid(uid_t uid)
1002 int old_euid = current->euid;
1003 int old_ruid, old_suid, new_ruid, new_suid;
1006 retval = security_task_setuid(uid, (uid_t)-1, (uid_t)-1, LSM_SETID_ID);
1010 old_ruid = new_ruid = current->uid;
1011 old_suid = current->suid;
1012 new_suid = old_suid;
1014 if (capable(CAP_SETUID)) {
1015 if (uid != old_ruid && set_user(uid, old_euid != uid) < 0)
1018 } else if ((uid != current->uid) && (uid != new_suid))
1021 if (old_euid != uid)
1023 current->mm->dumpable = suid_dumpable;
1026 current->fsuid = current->euid = uid;
1027 current->suid = new_suid;
1029 key_fsuid_changed(current);
1030 proc_id_connector(current, PROC_EVENT_UID);
1032 return security_task_post_setuid(old_ruid, old_euid, old_suid, LSM_SETID_ID);
1037 * This function implements a generic ability to update ruid, euid,
1038 * and suid. This allows you to implement the 4.4 compatible seteuid().
1040 asmlinkage long sys_setresuid(uid_t ruid, uid_t euid, uid_t suid)
1042 int old_ruid = current->uid;
1043 int old_euid = current->euid;
1044 int old_suid = current->suid;
1047 retval = security_task_setuid(ruid, euid, suid, LSM_SETID_RES);
1051 if (!capable(CAP_SETUID)) {
1052 if ((ruid != (uid_t) -1) && (ruid != current->uid) &&
1053 (ruid != current->euid) && (ruid != current->suid))
1055 if ((euid != (uid_t) -1) && (euid != current->uid) &&
1056 (euid != current->euid) && (euid != current->suid))
1058 if ((suid != (uid_t) -1) && (suid != current->uid) &&
1059 (suid != current->euid) && (suid != current->suid))
1062 if (ruid != (uid_t) -1) {
1063 if (ruid != current->uid && set_user(ruid, euid != current->euid) < 0)
1066 if (euid != (uid_t) -1) {
1067 if (euid != current->euid)
1069 current->mm->dumpable = suid_dumpable;
1072 current->euid = euid;
1074 current->fsuid = current->euid;
1075 if (suid != (uid_t) -1)
1076 current->suid = suid;
1078 key_fsuid_changed(current);
1079 proc_id_connector(current, PROC_EVENT_UID);
1081 return security_task_post_setuid(old_ruid, old_euid, old_suid, LSM_SETID_RES);
1084 asmlinkage long sys_getresuid(uid_t __user *ruid, uid_t __user *euid, uid_t __user *suid)
1088 if (!(retval = put_user(current->uid, ruid)) &&
1089 !(retval = put_user(current->euid, euid)))
1090 retval = put_user(current->suid, suid);
1096 * Same as above, but for rgid, egid, sgid.
1098 asmlinkage long sys_setresgid(gid_t rgid, gid_t egid, gid_t sgid)
1102 retval = security_task_setgid(rgid, egid, sgid, LSM_SETID_RES);
1106 if (!capable(CAP_SETGID)) {
1107 if ((rgid != (gid_t) -1) && (rgid != current->gid) &&
1108 (rgid != current->egid) && (rgid != current->sgid))
1110 if ((egid != (gid_t) -1) && (egid != current->gid) &&
1111 (egid != current->egid) && (egid != current->sgid))
1113 if ((sgid != (gid_t) -1) && (sgid != current->gid) &&
1114 (sgid != current->egid) && (sgid != current->sgid))
1117 if (egid != (gid_t) -1) {
1118 if (egid != current->egid)
1120 current->mm->dumpable = suid_dumpable;
1123 current->egid = egid;
1125 current->fsgid = current->egid;
1126 if (rgid != (gid_t) -1)
1127 current->gid = rgid;
1128 if (sgid != (gid_t) -1)
1129 current->sgid = sgid;
1131 key_fsgid_changed(current);
1132 proc_id_connector(current, PROC_EVENT_GID);
1136 asmlinkage long sys_getresgid(gid_t __user *rgid, gid_t __user *egid, gid_t __user *sgid)
1140 if (!(retval = put_user(current->gid, rgid)) &&
1141 !(retval = put_user(current->egid, egid)))
1142 retval = put_user(current->sgid, sgid);
1149 * "setfsuid()" sets the fsuid - the uid used for filesystem checks. This
1150 * is used for "access()" and for the NFS daemon (letting nfsd stay at
1151 * whatever uid it wants to). It normally shadows "euid", except when
1152 * explicitly set by setfsuid() or for access..
1154 asmlinkage long sys_setfsuid(uid_t uid)
1158 old_fsuid = current->fsuid;
1159 if (security_task_setuid(uid, (uid_t)-1, (uid_t)-1, LSM_SETID_FS))
1162 if (uid == current->uid || uid == current->euid ||
1163 uid == current->suid || uid == current->fsuid ||
1164 capable(CAP_SETUID))
1166 if (uid != old_fsuid)
1168 current->mm->dumpable = suid_dumpable;
1171 current->fsuid = uid;
1174 key_fsuid_changed(current);
1175 proc_id_connector(current, PROC_EVENT_UID);
1177 security_task_post_setuid(old_fsuid, (uid_t)-1, (uid_t)-1, LSM_SETID_FS);
1183 * Samma på svenska..
1185 asmlinkage long sys_setfsgid(gid_t gid)
1189 old_fsgid = current->fsgid;
1190 if (security_task_setgid(gid, (gid_t)-1, (gid_t)-1, LSM_SETID_FS))
1193 if (gid == current->gid || gid == current->egid ||
1194 gid == current->sgid || gid == current->fsgid ||
1195 capable(CAP_SETGID))
1197 if (gid != old_fsgid)
1199 current->mm->dumpable = suid_dumpable;
1202 current->fsgid = gid;
1203 key_fsgid_changed(current);
1204 proc_id_connector(current, PROC_EVENT_GID);
1209 asmlinkage long sys_times(struct tms __user * tbuf)
1212 * In the SMP world we might just be unlucky and have one of
1213 * the times increment as we use it. Since the value is an
1214 * atomically safe type this is just fine. Conceptually its
1215 * as if the syscall took an instant longer to occur.
1219 struct task_struct *tsk = current;
1220 struct task_struct *t;
1221 cputime_t utime, stime, cutime, cstime;
1223 spin_lock_irq(&tsk->sighand->siglock);
1224 utime = tsk->signal->utime;
1225 stime = tsk->signal->stime;
1228 utime = cputime_add(utime, t->utime);
1229 stime = cputime_add(stime, t->stime);
1233 cutime = tsk->signal->cutime;
1234 cstime = tsk->signal->cstime;
1235 spin_unlock_irq(&tsk->sighand->siglock);
1237 tmp.tms_utime = cputime_to_clock_t(utime);
1238 tmp.tms_stime = cputime_to_clock_t(stime);
1239 tmp.tms_cutime = cputime_to_clock_t(cutime);
1240 tmp.tms_cstime = cputime_to_clock_t(cstime);
1241 if (copy_to_user(tbuf, &tmp, sizeof(struct tms)))
1244 return (long) jiffies_64_to_clock_t(get_jiffies_64());
1248 * This needs some heavy checking ...
1249 * I just haven't the stomach for it. I also don't fully
1250 * understand sessions/pgrp etc. Let somebody who does explain it.
1252 * OK, I think I have the protection semantics right.... this is really
1253 * only important on a multi-user system anyway, to make sure one user
1254 * can't send a signal to a process owned by another. -TYT, 12/12/91
1256 * Auch. Had to add the 'did_exec' flag to conform completely to POSIX.
1260 asmlinkage long sys_setpgid(pid_t pid, pid_t pgid)
1262 struct task_struct *p;
1263 struct task_struct *group_leader = current->group_leader;
1268 pid = vx_map_pid(group_leader->pid);
1274 rpgid = vx_rmap_pid(pgid);
1276 /* From this point forward we keep holding onto the tasklist lock
1277 * so that our parent does not change from under us. -DaveM
1279 write_lock_irq(&tasklist_lock);
1282 p = find_task_by_pid(pid);
1287 if (!thread_group_leader(p))
1290 if (p->real_parent == group_leader) {
1292 if (p->signal->session != group_leader->signal->session)
1299 if (p != group_leader)
1304 if (p->signal->leader)
1308 struct task_struct *p;
1310 do_each_task_pid(rpgid, PIDTYPE_PGID, p) {
1311 if (p->signal->session == group_leader->signal->session)
1313 } while_each_task_pid(rpgid, PIDTYPE_PGID, p);
1318 err = security_task_setpgid(p, rpgid);
1322 if (process_group(p) != rpgid) {
1323 detach_pid(p, PIDTYPE_PGID);
1324 p->signal->pgrp = rpgid;
1325 attach_pid(p, PIDTYPE_PGID, rpgid);
1330 /* All paths lead to here, thus we are safe. -DaveM */
1331 write_unlock_irq(&tasklist_lock);
1335 asmlinkage long sys_getpgid(pid_t pid)
1338 return vx_rmap_pid(process_group(current));
1341 struct task_struct *p;
1343 read_lock(&tasklist_lock);
1344 p = find_task_by_pid(pid);
1348 retval = security_task_getpgid(p);
1350 retval = vx_rmap_pid(process_group(p));
1352 read_unlock(&tasklist_lock);
1357 #ifdef __ARCH_WANT_SYS_GETPGRP
1359 asmlinkage long sys_getpgrp(void)
1361 /* SMP - assuming writes are word atomic this is fine */
1362 return process_group(current);
1367 asmlinkage long sys_getsid(pid_t pid)
1370 return current->signal->session;
1373 struct task_struct *p;
1375 read_lock(&tasklist_lock);
1376 p = find_task_by_pid(pid);
1380 retval = security_task_getsid(p);
1382 retval = p->signal->session;
1384 read_unlock(&tasklist_lock);
1389 asmlinkage long sys_setsid(void)
1391 struct task_struct *group_leader = current->group_leader;
1395 mutex_lock(&tty_mutex);
1396 write_lock_irq(&tasklist_lock);
1398 /* Fail if I am already a session leader */
1399 if (group_leader->signal->leader)
1402 session = group_leader->pid;
1403 /* Fail if a process group id already exists that equals the
1404 * proposed session id.
1406 * Don't check if session id == 1 because kernel threads use this
1407 * session id and so the check will always fail and make it so
1408 * init cannot successfully call setsid.
1410 if (session > 1 && find_task_by_pid_type(PIDTYPE_PGID, session))
1413 group_leader->signal->leader = 1;
1414 __set_special_pids(session, session);
1415 group_leader->signal->tty = NULL;
1416 group_leader->signal->tty_old_pgrp = 0;
1417 err = process_group(group_leader);
1419 write_unlock_irq(&tasklist_lock);
1420 mutex_unlock(&tty_mutex);
1425 * Supplementary group IDs
1428 /* init to 2 - one for init_task, one to ensure it is never freed */
1429 struct group_info init_groups = { .usage = ATOMIC_INIT(2) };
1431 struct group_info *groups_alloc(int gidsetsize)
1433 struct group_info *group_info;
1437 nblocks = (gidsetsize + NGROUPS_PER_BLOCK - 1) / NGROUPS_PER_BLOCK;
1438 /* Make sure we always allocate at least one indirect block pointer */
1439 nblocks = nblocks ? : 1;
1440 group_info = kmalloc(sizeof(*group_info) + nblocks*sizeof(gid_t *), GFP_USER);
1443 group_info->ngroups = gidsetsize;
1444 group_info->nblocks = nblocks;
1445 atomic_set(&group_info->usage, 1);
1447 if (gidsetsize <= NGROUPS_SMALL) {
1448 group_info->blocks[0] = group_info->small_block;
1450 for (i = 0; i < nblocks; i++) {
1452 b = (void *)__get_free_page(GFP_USER);
1454 goto out_undo_partial_alloc;
1455 group_info->blocks[i] = b;
1460 out_undo_partial_alloc:
1462 free_page((unsigned long)group_info->blocks[i]);
1468 EXPORT_SYMBOL(groups_alloc);
1470 void groups_free(struct group_info *group_info)
1472 if (group_info->blocks[0] != group_info->small_block) {
1474 for (i = 0; i < group_info->nblocks; i++)
1475 free_page((unsigned long)group_info->blocks[i]);
1480 EXPORT_SYMBOL(groups_free);
1482 /* export the group_info to a user-space array */
1483 static int groups_to_user(gid_t __user *grouplist,
1484 struct group_info *group_info)
1487 int count = group_info->ngroups;
1489 for (i = 0; i < group_info->nblocks; i++) {
1490 int cp_count = min(NGROUPS_PER_BLOCK, count);
1491 int off = i * NGROUPS_PER_BLOCK;
1492 int len = cp_count * sizeof(*grouplist);
1494 if (copy_to_user(grouplist+off, group_info->blocks[i], len))
1502 /* fill a group_info from a user-space array - it must be allocated already */
1503 static int groups_from_user(struct group_info *group_info,
1504 gid_t __user *grouplist)
1507 int count = group_info->ngroups;
1509 for (i = 0; i < group_info->nblocks; i++) {
1510 int cp_count = min(NGROUPS_PER_BLOCK, count);
1511 int off = i * NGROUPS_PER_BLOCK;
1512 int len = cp_count * sizeof(*grouplist);
1514 if (copy_from_user(group_info->blocks[i], grouplist+off, len))
1522 /* a simple Shell sort */
1523 static void groups_sort(struct group_info *group_info)
1525 int base, max, stride;
1526 int gidsetsize = group_info->ngroups;
1528 for (stride = 1; stride < gidsetsize; stride = 3 * stride + 1)
1533 max = gidsetsize - stride;
1534 for (base = 0; base < max; base++) {
1536 int right = left + stride;
1537 gid_t tmp = GROUP_AT(group_info, right);
1539 while (left >= 0 && GROUP_AT(group_info, left) > tmp) {
1540 GROUP_AT(group_info, right) =
1541 GROUP_AT(group_info, left);
1545 GROUP_AT(group_info, right) = tmp;
1551 /* a simple bsearch */
1552 int groups_search(struct group_info *group_info, gid_t grp)
1554 unsigned int left, right;
1560 right = group_info->ngroups;
1561 while (left < right) {
1562 unsigned int mid = (left+right)/2;
1563 int cmp = grp - GROUP_AT(group_info, mid);
1574 /* validate and set current->group_info */
1575 int set_current_groups(struct group_info *group_info)
1578 struct group_info *old_info;
1580 retval = security_task_setgroups(group_info);
1584 groups_sort(group_info);
1585 get_group_info(group_info);
1588 old_info = current->group_info;
1589 current->group_info = group_info;
1590 task_unlock(current);
1592 put_group_info(old_info);
1597 EXPORT_SYMBOL(set_current_groups);
1599 asmlinkage long sys_getgroups(int gidsetsize, gid_t __user *grouplist)
1604 * SMP: Nobody else can change our grouplist. Thus we are
1611 /* no need to grab task_lock here; it cannot change */
1612 i = current->group_info->ngroups;
1614 if (i > gidsetsize) {
1618 if (groups_to_user(grouplist, current->group_info)) {
1628 * SMP: Our groups are copy-on-write. We can set them safely
1629 * without another task interfering.
1632 asmlinkage long sys_setgroups(int gidsetsize, gid_t __user *grouplist)
1634 struct group_info *group_info;
1637 if (!capable(CAP_SETGID))
1639 if ((unsigned)gidsetsize > NGROUPS_MAX)
1642 group_info = groups_alloc(gidsetsize);
1645 retval = groups_from_user(group_info, grouplist);
1647 put_group_info(group_info);
1651 retval = set_current_groups(group_info);
1652 put_group_info(group_info);
1658 * Check whether we're fsgid/egid or in the supplemental group..
1660 int in_group_p(gid_t grp)
1663 if (grp != current->fsgid) {
1664 retval = groups_search(current->group_info, grp);
1669 EXPORT_SYMBOL(in_group_p);
1671 int in_egroup_p(gid_t grp)
1674 if (grp != current->egid) {
1675 retval = groups_search(current->group_info, grp);
1680 EXPORT_SYMBOL(in_egroup_p);
1682 DECLARE_RWSEM(uts_sem);
1684 EXPORT_SYMBOL(uts_sem);
1686 asmlinkage long sys_newuname(struct new_utsname __user * name)
1690 down_read(&uts_sem);
1691 if (copy_to_user(name, vx_new_utsname(), sizeof *name))
1697 asmlinkage long sys_sethostname(char __user *name, int len)
1700 char tmp[__NEW_UTS_LEN];
1702 if (!vx_capable(CAP_SYS_ADMIN, VXC_SET_UTSNAME))
1704 if (len < 0 || len > __NEW_UTS_LEN)
1706 down_write(&uts_sem);
1708 if (!copy_from_user(tmp, name, len)) {
1709 char *ptr = vx_new_uts(nodename);
1711 memcpy(ptr, tmp, len);
1719 #ifdef __ARCH_WANT_SYS_GETHOSTNAME
1721 asmlinkage long sys_gethostname(char __user *name, int len)
1728 down_read(&uts_sem);
1729 ptr = vx_new_uts(nodename);
1730 i = 1 + strlen(ptr);
1734 if (copy_to_user(name, ptr, i))
1743 * Only setdomainname; getdomainname can be implemented by calling
1746 asmlinkage long sys_setdomainname(char __user *name, int len)
1749 char tmp[__NEW_UTS_LEN];
1751 if (!vx_capable(CAP_SYS_ADMIN, VXC_SET_UTSNAME))
1753 if (len < 0 || len > __NEW_UTS_LEN)
1756 down_write(&uts_sem);
1758 if (!copy_from_user(tmp, name, len)) {
1759 char *ptr = vx_new_uts(domainname);
1761 memcpy(ptr, tmp, len);
1769 asmlinkage long sys_getrlimit(unsigned int resource, struct rlimit __user *rlim)
1771 if (resource >= RLIM_NLIMITS)
1774 struct rlimit value;
1775 task_lock(current->group_leader);
1776 value = current->signal->rlim[resource];
1777 task_unlock(current->group_leader);
1778 return copy_to_user(rlim, &value, sizeof(*rlim)) ? -EFAULT : 0;
1782 #ifdef __ARCH_WANT_SYS_OLD_GETRLIMIT
1785 * Back compatibility for getrlimit. Needed for some apps.
1788 asmlinkage long sys_old_getrlimit(unsigned int resource, struct rlimit __user *rlim)
1791 if (resource >= RLIM_NLIMITS)
1794 task_lock(current->group_leader);
1795 x = current->signal->rlim[resource];
1796 task_unlock(current->group_leader);
1797 if(x.rlim_cur > 0x7FFFFFFF)
1798 x.rlim_cur = 0x7FFFFFFF;
1799 if(x.rlim_max > 0x7FFFFFFF)
1800 x.rlim_max = 0x7FFFFFFF;
1801 return copy_to_user(rlim, &x, sizeof(x))?-EFAULT:0;
1806 asmlinkage long sys_setrlimit(unsigned int resource, struct rlimit __user *rlim)
1808 struct rlimit new_rlim, *old_rlim;
1809 unsigned long it_prof_secs;
1812 if (resource >= RLIM_NLIMITS)
1814 if (copy_from_user(&new_rlim, rlim, sizeof(*rlim)))
1816 if (new_rlim.rlim_cur > new_rlim.rlim_max)
1818 old_rlim = current->signal->rlim + resource;
1819 if ((new_rlim.rlim_max > old_rlim->rlim_max) &&
1820 !vx_capable(CAP_SYS_RESOURCE, VXC_SET_RLIMIT))
1822 if (resource == RLIMIT_NOFILE && new_rlim.rlim_max > NR_OPEN)
1825 retval = security_task_setrlimit(resource, &new_rlim);
1829 task_lock(current->group_leader);
1830 *old_rlim = new_rlim;
1831 task_unlock(current->group_leader);
1833 if (resource != RLIMIT_CPU)
1837 * RLIMIT_CPU handling. Note that the kernel fails to return an error
1838 * code if it rejected the user's attempt to set RLIMIT_CPU. This is a
1839 * very long-standing error, and fixing it now risks breakage of
1840 * applications, so we live with it
1842 if (new_rlim.rlim_cur == RLIM_INFINITY)
1845 it_prof_secs = cputime_to_secs(current->signal->it_prof_expires);
1846 if (it_prof_secs == 0 || new_rlim.rlim_cur <= it_prof_secs) {
1847 unsigned long rlim_cur = new_rlim.rlim_cur;
1850 if (rlim_cur == 0) {
1852 * The caller is asking for an immediate RLIMIT_CPU
1853 * expiry. But we use the zero value to mean "it was
1854 * never set". So let's cheat and make it one second
1859 cputime = secs_to_cputime(rlim_cur);
1860 read_lock(&tasklist_lock);
1861 spin_lock_irq(¤t->sighand->siglock);
1862 set_process_cpu_timer(current, CPUCLOCK_PROF, &cputime, NULL);
1863 spin_unlock_irq(¤t->sighand->siglock);
1864 read_unlock(&tasklist_lock);
1871 * It would make sense to put struct rusage in the task_struct,
1872 * except that would make the task_struct be *really big*. After
1873 * task_struct gets moved into malloc'ed memory, it would
1874 * make sense to do this. It will make moving the rest of the information
1875 * a lot simpler! (Which we're not doing right now because we're not
1876 * measuring them yet).
1878 * When sampling multiple threads for RUSAGE_SELF, under SMP we might have
1879 * races with threads incrementing their own counters. But since word
1880 * reads are atomic, we either get new values or old values and we don't
1881 * care which for the sums. We always take the siglock to protect reading
1882 * the c* fields from p->signal from races with exit.c updating those
1883 * fields when reaping, so a sample either gets all the additions of a
1884 * given child after it's reaped, or none so this sample is before reaping.
1886 * tasklist_lock locking optimisation:
1887 * If we are current and single threaded, we do not need to take the tasklist
1888 * lock or the siglock. No one else can take our signal_struct away,
1889 * no one else can reap the children to update signal->c* counters, and
1890 * no one else can race with the signal-> fields.
1891 * If we do not take the tasklist_lock, the signal-> fields could be read
1892 * out of order while another thread was just exiting. So we place a
1893 * read memory barrier when we avoid the lock. On the writer side,
1894 * write memory barrier is implied in __exit_signal as __exit_signal releases
1895 * the siglock spinlock after updating the signal-> fields.
1897 * We don't really need the siglock when we access the non c* fields
1898 * of the signal_struct (for RUSAGE_SELF) even in multithreaded
1899 * case, since we take the tasklist lock for read and the non c* signal->
1900 * fields are updated only in __exit_signal, which is called with
1901 * tasklist_lock taken for write, hence these two threads cannot execute
1906 static void k_getrusage(struct task_struct *p, int who, struct rusage *r)
1908 struct task_struct *t;
1909 unsigned long flags;
1910 cputime_t utime, stime;
1913 memset((char *) r, 0, sizeof *r);
1914 utime = stime = cputime_zero;
1916 if (p != current || !thread_group_empty(p))
1920 read_lock(&tasklist_lock);
1921 if (unlikely(!p->signal)) {
1922 read_unlock(&tasklist_lock);
1926 /* See locking comments above */
1931 case RUSAGE_CHILDREN:
1932 spin_lock_irqsave(&p->sighand->siglock, flags);
1933 utime = p->signal->cutime;
1934 stime = p->signal->cstime;
1935 r->ru_nvcsw = p->signal->cnvcsw;
1936 r->ru_nivcsw = p->signal->cnivcsw;
1937 r->ru_minflt = p->signal->cmin_flt;
1938 r->ru_majflt = p->signal->cmaj_flt;
1939 spin_unlock_irqrestore(&p->sighand->siglock, flags);
1941 if (who == RUSAGE_CHILDREN)
1945 utime = cputime_add(utime, p->signal->utime);
1946 stime = cputime_add(stime, p->signal->stime);
1947 r->ru_nvcsw += p->signal->nvcsw;
1948 r->ru_nivcsw += p->signal->nivcsw;
1949 r->ru_minflt += p->signal->min_flt;
1950 r->ru_majflt += p->signal->maj_flt;
1953 utime = cputime_add(utime, t->utime);
1954 stime = cputime_add(stime, t->stime);
1955 r->ru_nvcsw += t->nvcsw;
1956 r->ru_nivcsw += t->nivcsw;
1957 r->ru_minflt += t->min_flt;
1958 r->ru_majflt += t->maj_flt;
1968 read_unlock(&tasklist_lock);
1969 cputime_to_timeval(utime, &r->ru_utime);
1970 cputime_to_timeval(stime, &r->ru_stime);
1973 int getrusage(struct task_struct *p, int who, struct rusage __user *ru)
1976 k_getrusage(p, who, &r);
1977 return copy_to_user(ru, &r, sizeof(r)) ? -EFAULT : 0;
1980 asmlinkage long sys_getrusage(int who, struct rusage __user *ru)
1982 if (who != RUSAGE_SELF && who != RUSAGE_CHILDREN)
1984 return getrusage(current, who, ru);
1987 asmlinkage long sys_umask(int mask)
1989 mask = xchg(¤t->fs->umask, mask & S_IRWXUGO);
1993 asmlinkage long sys_prctl(int option, unsigned long arg2, unsigned long arg3,
1994 unsigned long arg4, unsigned long arg5)
1998 error = security_task_prctl(option, arg2, arg3, arg4, arg5);
2003 case PR_SET_PDEATHSIG:
2004 if (!valid_signal(arg2)) {
2008 current->pdeath_signal = arg2;
2010 case PR_GET_PDEATHSIG:
2011 error = put_user(current->pdeath_signal, (int __user *)arg2);
2013 case PR_GET_DUMPABLE:
2014 error = current->mm->dumpable;
2016 case PR_SET_DUMPABLE:
2017 if (arg2 < 0 || arg2 > 1) {
2021 current->mm->dumpable = arg2;
2024 case PR_SET_UNALIGN:
2025 error = SET_UNALIGN_CTL(current, arg2);
2027 case PR_GET_UNALIGN:
2028 error = GET_UNALIGN_CTL(current, arg2);
2031 error = SET_FPEMU_CTL(current, arg2);
2034 error = GET_FPEMU_CTL(current, arg2);
2037 error = SET_FPEXC_CTL(current, arg2);
2040 error = GET_FPEXC_CTL(current, arg2);
2043 error = PR_TIMING_STATISTICAL;
2046 if (arg2 == PR_TIMING_STATISTICAL)
2052 case PR_GET_KEEPCAPS:
2053 if (current->keep_capabilities)
2056 case PR_SET_KEEPCAPS:
2057 if (arg2 != 0 && arg2 != 1) {
2061 current->keep_capabilities = arg2;
2064 struct task_struct *me = current;
2065 unsigned char ncomm[sizeof(me->comm)];
2067 ncomm[sizeof(me->comm)-1] = 0;
2068 if (strncpy_from_user(ncomm, (char __user *)arg2,
2069 sizeof(me->comm)-1) < 0)
2071 set_task_comm(me, ncomm);
2075 struct task_struct *me = current;
2076 unsigned char tcomm[sizeof(me->comm)];
2078 get_task_comm(tcomm, me);
2079 if (copy_to_user((char __user *)arg2, tcomm, sizeof(tcomm)))