4 * Copyright (C) 1991, 1992 Linus Torvalds
7 #include <linux/config.h>
8 #include <linux/module.h>
10 #include <linux/utsname.h>
11 #include <linux/mman.h>
12 #include <linux/smp_lock.h>
13 #include <linux/notifier.h>
14 #include <linux/kmod.h>
15 #include <linux/reboot.h>
16 #include <linux/prctl.h>
17 #include <linux/init.h>
18 #include <linux/highuid.h>
20 #include <linux/kernel.h>
21 #include <linux/kexec.h>
22 #include <linux/workqueue.h>
23 #include <linux/capability.h>
24 #include <linux/device.h>
25 #include <linux/key.h>
26 #include <linux/times.h>
27 #include <linux/posix-timers.h>
28 #include <linux/security.h>
29 #include <linux/dcookies.h>
30 #include <linux/suspend.h>
31 #include <linux/tty.h>
32 #include <linux/signal.h>
33 #include <linux/cn_proc.h>
34 #include <linux/vs_base.h>
35 #include <linux/vs_cvirt.h>
37 #include <linux/compat.h>
38 #include <linux/syscalls.h>
39 #include <linux/kprobes.h>
41 #include <asm/uaccess.h>
43 #include <asm/unistd.h>
45 #ifndef SET_UNALIGN_CTL
46 # define SET_UNALIGN_CTL(a,b) (-EINVAL)
48 #ifndef GET_UNALIGN_CTL
49 # define GET_UNALIGN_CTL(a,b) (-EINVAL)
52 # define SET_FPEMU_CTL(a,b) (-EINVAL)
55 # define GET_FPEMU_CTL(a,b) (-EINVAL)
58 # define SET_FPEXC_CTL(a,b) (-EINVAL)
61 # define GET_FPEXC_CTL(a,b) (-EINVAL)
65 * this is where the system-wide overflow UID and GID are defined, for
66 * architectures that now have 32-bit UID/GID but didn't in the past
69 int overflowuid = DEFAULT_OVERFLOWUID;
70 int overflowgid = DEFAULT_OVERFLOWGID;
73 EXPORT_SYMBOL(overflowuid);
74 EXPORT_SYMBOL(overflowgid);
78 * the same as above, but for filesystems which can only store a 16-bit
79 * UID and GID. as such, this is needed on all architectures
82 int fs_overflowuid = DEFAULT_FS_OVERFLOWUID;
83 int fs_overflowgid = DEFAULT_FS_OVERFLOWUID;
85 EXPORT_SYMBOL(fs_overflowuid);
86 EXPORT_SYMBOL(fs_overflowgid);
89 * this indicates whether you can reboot with ctrl-alt-del: the default is yes
96 * Notifier list for kernel code which wants to be called
97 * at shutdown. This is used to stop any idling DMA operations
101 static struct notifier_block *reboot_notifier_list;
102 static DEFINE_RWLOCK(notifier_lock);
105 * notifier_chain_register - Add notifier to a notifier chain
106 * @list: Pointer to root list pointer
107 * @n: New entry in notifier chain
109 * Adds a notifier to a notifier chain.
111 * Currently always returns zero.
114 int notifier_chain_register(struct notifier_block **list, struct notifier_block *n)
116 write_lock(¬ifier_lock);
119 if(n->priority > (*list)->priority)
121 list= &((*list)->next);
125 write_unlock(¬ifier_lock);
129 EXPORT_SYMBOL(notifier_chain_register);
132 * notifier_chain_unregister - Remove notifier from a notifier chain
133 * @nl: Pointer to root list pointer
134 * @n: New entry in notifier chain
136 * Removes a notifier from a notifier chain.
138 * Returns zero on success, or %-ENOENT on failure.
141 int notifier_chain_unregister(struct notifier_block **nl, struct notifier_block *n)
143 write_lock(¬ifier_lock);
149 write_unlock(¬ifier_lock);
154 write_unlock(¬ifier_lock);
158 EXPORT_SYMBOL(notifier_chain_unregister);
161 * notifier_call_chain - Call functions in a notifier chain
162 * @n: Pointer to root pointer of notifier chain
163 * @val: Value passed unmodified to notifier function
164 * @v: Pointer passed unmodified to notifier function
166 * Calls each function in a notifier chain in turn.
168 * If the return value of the notifier can be and'd
169 * with %NOTIFY_STOP_MASK, then notifier_call_chain
170 * will return immediately, with the return value of
171 * the notifier function which halted execution.
172 * Otherwise, the return value is the return value
173 * of the last notifier function called.
176 int __kprobes notifier_call_chain(struct notifier_block **n, unsigned long val, void *v)
179 struct notifier_block *nb = *n;
183 ret=nb->notifier_call(nb,val,v);
184 if(ret&NOTIFY_STOP_MASK)
193 EXPORT_SYMBOL(notifier_call_chain);
196 * register_reboot_notifier - Register function to be called at reboot time
197 * @nb: Info about notifier function to be called
199 * Registers a function with the list of functions
200 * to be called at reboot time.
202 * Currently always returns zero, as notifier_chain_register
203 * always returns zero.
206 int register_reboot_notifier(struct notifier_block * nb)
208 return notifier_chain_register(&reboot_notifier_list, nb);
211 EXPORT_SYMBOL(register_reboot_notifier);
214 * unregister_reboot_notifier - Unregister previously registered reboot notifier
215 * @nb: Hook to be unregistered
217 * Unregisters a previously registered reboot
220 * Returns zero on success, or %-ENOENT on failure.
223 int unregister_reboot_notifier(struct notifier_block * nb)
225 return notifier_chain_unregister(&reboot_notifier_list, nb);
228 EXPORT_SYMBOL(unregister_reboot_notifier);
230 #ifndef CONFIG_SECURITY
233 if (vx_check_bit(VXC_CAP_MASK, cap) && !vx_mcaps(1L << cap))
235 if (cap_raised(current->cap_effective, cap)) {
236 current->flags |= PF_SUPERPRIV;
241 EXPORT_SYMBOL(capable);
244 static int set_one_prio(struct task_struct *p, int niceval, int error)
248 if (p->uid != current->euid &&
249 p->euid != current->euid && !capable(CAP_SYS_NICE)) {
253 if (niceval < task_nice(p) && !can_nice(p, niceval)) {
254 if (vx_flags(VXF_IGNEG_NICE, 0))
260 no_nice = security_task_setnice(p, niceval);
267 set_user_nice(p, niceval);
272 asmlinkage long sys_setpriority(int which, int who, int niceval)
274 struct task_struct *g, *p;
275 struct user_struct *user;
278 if (which > 2 || which < 0)
281 /* normalize: avoid signed division (rounding problems) */
288 read_lock(&tasklist_lock);
293 p = find_task_by_pid(who);
295 error = set_one_prio(p, niceval, error);
299 who = process_group(current);
300 do_each_task_pid(who, PIDTYPE_PGID, p) {
301 error = set_one_prio(p, niceval, error);
302 } while_each_task_pid(who, PIDTYPE_PGID, p);
305 user = current->user;
309 if ((who != current->uid) &&
310 !(user = find_user(vx_current_xid(), who)))
311 goto out_unlock; /* No processes for this user */
315 error = set_one_prio(p, niceval, error);
316 while_each_thread(g, p);
317 if (who != current->uid)
318 free_uid(user); /* For find_user() */
322 read_unlock(&tasklist_lock);
328 * Ugh. To avoid negative return values, "getpriority()" will
329 * not return the normal nice-value, but a negated value that
330 * has been offset by 20 (ie it returns 40..1 instead of -20..19)
331 * to stay compatible.
333 asmlinkage long sys_getpriority(int which, int who)
335 struct task_struct *g, *p;
336 struct user_struct *user;
337 long niceval, retval = -ESRCH;
339 if (which > 2 || which < 0)
342 read_lock(&tasklist_lock);
347 p = find_task_by_pid(who);
349 niceval = 20 - task_nice(p);
350 if (niceval > retval)
356 who = process_group(current);
357 do_each_task_pid(who, PIDTYPE_PGID, p) {
358 niceval = 20 - task_nice(p);
359 if (niceval > retval)
361 } while_each_task_pid(who, PIDTYPE_PGID, p);
364 user = current->user;
368 if ((who != current->uid) &&
369 !(user = find_user(vx_current_xid(), who)))
370 goto out_unlock; /* No processes for this user */
374 niceval = 20 - task_nice(p);
375 if (niceval > retval)
378 while_each_thread(g, p);
379 if (who != current->uid)
380 free_uid(user); /* for find_user() */
384 read_unlock(&tasklist_lock);
390 * emergency_restart - reboot the system
392 * Without shutting down any hardware or taking any locks
393 * reboot the system. This is called when we know we are in
394 * trouble so this is our best effort to reboot. This is
395 * safe to call in interrupt context.
397 void emergency_restart(void)
399 machine_emergency_restart();
401 EXPORT_SYMBOL_GPL(emergency_restart);
403 void kernel_restart_prepare(char *cmd)
405 notifier_call_chain(&reboot_notifier_list, SYS_RESTART, cmd);
406 system_state = SYSTEM_RESTART;
411 * kernel_restart - reboot the system
412 * @cmd: pointer to buffer containing command to execute for restart
415 * Shutdown everything and perform a clean reboot.
416 * This is not safe to call in interrupt context.
418 void kernel_restart(char *cmd)
420 kernel_restart_prepare(cmd);
422 printk(KERN_EMERG "Restarting system.\n");
424 printk(KERN_EMERG "Restarting system with command '%s'.\n", cmd);
427 machine_restart(cmd);
429 EXPORT_SYMBOL_GPL(kernel_restart);
432 * kernel_kexec - reboot the system
434 * Move into place and start executing a preloaded standalone
435 * executable. If nothing was preloaded return an error.
437 void kernel_kexec(void)
440 struct kimage *image;
441 image = xchg(&kexec_image, NULL);
445 kernel_restart_prepare(NULL);
446 printk(KERN_EMERG "Starting new kernel\n");
448 machine_kexec(image);
451 EXPORT_SYMBOL_GPL(kernel_kexec);
453 void kernel_shutdown_prepare(enum system_states state)
455 notifier_call_chain(&reboot_notifier_list,
456 (state == SYSTEM_HALT)?SYS_HALT:SYS_POWER_OFF, NULL);
457 system_state = state;
461 * kernel_halt - halt the system
463 * Shutdown everything and perform a clean system halt.
465 void kernel_halt(void)
467 kernel_shutdown_prepare(SYSTEM_HALT);
468 printk(KERN_EMERG "System halted.\n");
472 EXPORT_SYMBOL_GPL(kernel_halt);
475 * kernel_power_off - power_off the system
477 * Shutdown everything and perform a clean system power_off.
479 void kernel_power_off(void)
481 kernel_shutdown_prepare(SYSTEM_POWER_OFF);
482 printk(KERN_EMERG "Power down.\n");
485 EXPORT_SYMBOL_GPL(kernel_power_off);
487 long vs_reboot(unsigned int, void __user *);
490 * Reboot system call: for obvious reasons only root may call it,
491 * and even root needs to set up some magic numbers in the registers
492 * so that some mistake won't make this reboot the whole machine.
493 * You can also set the meaning of the ctrl-alt-del-key here.
495 * reboot doesn't sync: do that yourself before calling this.
497 asmlinkage long sys_reboot(int magic1, int magic2, unsigned int cmd, void __user * arg)
501 /* We only trust the superuser with rebooting the system. */
502 if (!capable(CAP_SYS_BOOT))
505 /* For safety, we require "magic" arguments. */
506 if (magic1 != LINUX_REBOOT_MAGIC1 ||
507 (magic2 != LINUX_REBOOT_MAGIC2 &&
508 magic2 != LINUX_REBOOT_MAGIC2A &&
509 magic2 != LINUX_REBOOT_MAGIC2B &&
510 magic2 != LINUX_REBOOT_MAGIC2C))
513 /* Instead of trying to make the power_off code look like
514 * halt when pm_power_off is not set do it the easy way.
516 if ((cmd == LINUX_REBOOT_CMD_POWER_OFF) && !pm_power_off)
517 cmd = LINUX_REBOOT_CMD_HALT;
519 if (!vx_check(0, VX_ADMIN|VX_WATCH))
520 return vs_reboot(cmd, arg);
524 case LINUX_REBOOT_CMD_RESTART:
525 kernel_restart(NULL);
528 case LINUX_REBOOT_CMD_CAD_ON:
532 case LINUX_REBOOT_CMD_CAD_OFF:
536 case LINUX_REBOOT_CMD_HALT:
542 case LINUX_REBOOT_CMD_POWER_OFF:
548 case LINUX_REBOOT_CMD_RESTART2:
549 if (strncpy_from_user(&buffer[0], arg, sizeof(buffer) - 1) < 0) {
553 buffer[sizeof(buffer) - 1] = '\0';
555 kernel_restart(buffer);
558 case LINUX_REBOOT_CMD_KEXEC:
563 #ifdef CONFIG_SOFTWARE_SUSPEND
564 case LINUX_REBOOT_CMD_SW_SUSPEND:
566 int ret = software_suspend();
580 static void deferred_cad(void *dummy)
582 kernel_restart(NULL);
586 * This function gets called by ctrl-alt-del - ie the keyboard interrupt.
587 * As it's called within an interrupt, it may NOT sync: the only choice
588 * is whether to reboot at once, or just ignore the ctrl-alt-del.
590 void ctrl_alt_del(void)
592 static DECLARE_WORK(cad_work, deferred_cad, NULL);
595 schedule_work(&cad_work);
597 kill_proc(cad_pid, SIGINT, 1);
602 * Unprivileged users may change the real gid to the effective gid
603 * or vice versa. (BSD-style)
605 * If you set the real gid at all, or set the effective gid to a value not
606 * equal to the real gid, then the saved gid is set to the new effective gid.
608 * This makes it possible for a setgid program to completely drop its
609 * privileges, which is often a useful assertion to make when you are doing
610 * a security audit over a program.
612 * The general idea is that a program which uses just setregid() will be
613 * 100% compatible with BSD. A program which uses just setgid() will be
614 * 100% compatible with POSIX with saved IDs.
616 * SMP: There are not races, the GIDs are checked only by filesystem
617 * operations (as far as semantic preservation is concerned).
619 asmlinkage long sys_setregid(gid_t rgid, gid_t egid)
621 int old_rgid = current->gid;
622 int old_egid = current->egid;
623 int new_rgid = old_rgid;
624 int new_egid = old_egid;
627 retval = security_task_setgid(rgid, egid, (gid_t)-1, LSM_SETID_RE);
631 if (rgid != (gid_t) -1) {
632 if ((old_rgid == rgid) ||
633 (current->egid==rgid) ||
639 if (egid != (gid_t) -1) {
640 if ((old_rgid == egid) ||
641 (current->egid == egid) ||
642 (current->sgid == egid) ||
649 if (new_egid != old_egid)
651 current->mm->dumpable = suid_dumpable;
654 if (rgid != (gid_t) -1 ||
655 (egid != (gid_t) -1 && egid != old_rgid))
656 current->sgid = new_egid;
657 current->fsgid = new_egid;
658 current->egid = new_egid;
659 current->gid = new_rgid;
660 key_fsgid_changed(current);
661 proc_id_connector(current, PROC_EVENT_GID);
666 * setgid() is implemented like SysV w/ SAVED_IDS
668 * SMP: Same implicit races as above.
670 asmlinkage long sys_setgid(gid_t gid)
672 int old_egid = current->egid;
675 retval = security_task_setgid(gid, (gid_t)-1, (gid_t)-1, LSM_SETID_ID);
679 if (capable(CAP_SETGID))
683 current->mm->dumpable = suid_dumpable;
686 current->gid = current->egid = current->sgid = current->fsgid = gid;
688 else if ((gid == current->gid) || (gid == current->sgid))
692 current->mm->dumpable = suid_dumpable;
695 current->egid = current->fsgid = gid;
700 key_fsgid_changed(current);
701 proc_id_connector(current, PROC_EVENT_GID);
705 static int set_user(uid_t new_ruid, int dumpclear)
707 struct user_struct *new_user;
709 new_user = alloc_uid(vx_current_xid(), new_ruid);
713 if (atomic_read(&new_user->processes) >=
714 current->signal->rlim[RLIMIT_NPROC].rlim_cur &&
715 new_user != &root_user) {
720 switch_uid(new_user);
724 current->mm->dumpable = suid_dumpable;
727 current->uid = new_ruid;
732 * Unprivileged users may change the real uid to the effective uid
733 * or vice versa. (BSD-style)
735 * If you set the real uid at all, or set the effective uid to a value not
736 * equal to the real uid, then the saved uid is set to the new effective uid.
738 * This makes it possible for a setuid program to completely drop its
739 * privileges, which is often a useful assertion to make when you are doing
740 * a security audit over a program.
742 * The general idea is that a program which uses just setreuid() will be
743 * 100% compatible with BSD. A program which uses just setuid() will be
744 * 100% compatible with POSIX with saved IDs.
746 asmlinkage long sys_setreuid(uid_t ruid, uid_t euid)
748 int old_ruid, old_euid, old_suid, new_ruid, new_euid;
751 retval = security_task_setuid(ruid, euid, (uid_t)-1, LSM_SETID_RE);
755 new_ruid = old_ruid = current->uid;
756 new_euid = old_euid = current->euid;
757 old_suid = current->suid;
759 if (ruid != (uid_t) -1) {
761 if ((old_ruid != ruid) &&
762 (current->euid != ruid) &&
763 !capable(CAP_SETUID))
767 if (euid != (uid_t) -1) {
769 if ((old_ruid != euid) &&
770 (current->euid != euid) &&
771 (current->suid != euid) &&
772 !capable(CAP_SETUID))
776 if (new_ruid != old_ruid && set_user(new_ruid, new_euid != old_euid) < 0)
779 if (new_euid != old_euid)
781 current->mm->dumpable = suid_dumpable;
784 current->fsuid = current->euid = new_euid;
785 if (ruid != (uid_t) -1 ||
786 (euid != (uid_t) -1 && euid != old_ruid))
787 current->suid = current->euid;
788 current->fsuid = current->euid;
790 key_fsuid_changed(current);
791 proc_id_connector(current, PROC_EVENT_UID);
793 return security_task_post_setuid(old_ruid, old_euid, old_suid, LSM_SETID_RE);
799 * setuid() is implemented like SysV with SAVED_IDS
801 * Note that SAVED_ID's is deficient in that a setuid root program
802 * like sendmail, for example, cannot set its uid to be a normal
803 * user and then switch back, because if you're root, setuid() sets
804 * the saved uid too. If you don't like this, blame the bright people
805 * in the POSIX committee and/or USG. Note that the BSD-style setreuid()
806 * will allow a root program to temporarily drop privileges and be able to
807 * regain them by swapping the real and effective uid.
809 asmlinkage long sys_setuid(uid_t uid)
811 int old_euid = current->euid;
812 int old_ruid, old_suid, new_ruid, new_suid;
815 retval = security_task_setuid(uid, (uid_t)-1, (uid_t)-1, LSM_SETID_ID);
819 old_ruid = new_ruid = current->uid;
820 old_suid = current->suid;
823 if (capable(CAP_SETUID)) {
824 if (uid != old_ruid && set_user(uid, old_euid != uid) < 0)
827 } else if ((uid != current->uid) && (uid != new_suid))
832 current->mm->dumpable = suid_dumpable;
835 current->fsuid = current->euid = uid;
836 current->suid = new_suid;
838 key_fsuid_changed(current);
839 proc_id_connector(current, PROC_EVENT_UID);
841 return security_task_post_setuid(old_ruid, old_euid, old_suid, LSM_SETID_ID);
846 * This function implements a generic ability to update ruid, euid,
847 * and suid. This allows you to implement the 4.4 compatible seteuid().
849 asmlinkage long sys_setresuid(uid_t ruid, uid_t euid, uid_t suid)
851 int old_ruid = current->uid;
852 int old_euid = current->euid;
853 int old_suid = current->suid;
856 retval = security_task_setuid(ruid, euid, suid, LSM_SETID_RES);
860 if (!capable(CAP_SETUID)) {
861 if ((ruid != (uid_t) -1) && (ruid != current->uid) &&
862 (ruid != current->euid) && (ruid != current->suid))
864 if ((euid != (uid_t) -1) && (euid != current->uid) &&
865 (euid != current->euid) && (euid != current->suid))
867 if ((suid != (uid_t) -1) && (suid != current->uid) &&
868 (suid != current->euid) && (suid != current->suid))
871 if (ruid != (uid_t) -1) {
872 if (ruid != current->uid && set_user(ruid, euid != current->euid) < 0)
875 if (euid != (uid_t) -1) {
876 if (euid != current->euid)
878 current->mm->dumpable = suid_dumpable;
881 current->euid = euid;
883 current->fsuid = current->euid;
884 if (suid != (uid_t) -1)
885 current->suid = suid;
887 key_fsuid_changed(current);
888 proc_id_connector(current, PROC_EVENT_UID);
890 return security_task_post_setuid(old_ruid, old_euid, old_suid, LSM_SETID_RES);
893 asmlinkage long sys_getresuid(uid_t __user *ruid, uid_t __user *euid, uid_t __user *suid)
897 if (!(retval = put_user(current->uid, ruid)) &&
898 !(retval = put_user(current->euid, euid)))
899 retval = put_user(current->suid, suid);
905 * Same as above, but for rgid, egid, sgid.
907 asmlinkage long sys_setresgid(gid_t rgid, gid_t egid, gid_t sgid)
911 retval = security_task_setgid(rgid, egid, sgid, LSM_SETID_RES);
915 if (!capable(CAP_SETGID)) {
916 if ((rgid != (gid_t) -1) && (rgid != current->gid) &&
917 (rgid != current->egid) && (rgid != current->sgid))
919 if ((egid != (gid_t) -1) && (egid != current->gid) &&
920 (egid != current->egid) && (egid != current->sgid))
922 if ((sgid != (gid_t) -1) && (sgid != current->gid) &&
923 (sgid != current->egid) && (sgid != current->sgid))
926 if (egid != (gid_t) -1) {
927 if (egid != current->egid)
929 current->mm->dumpable = suid_dumpable;
932 current->egid = egid;
934 current->fsgid = current->egid;
935 if (rgid != (gid_t) -1)
937 if (sgid != (gid_t) -1)
938 current->sgid = sgid;
940 key_fsgid_changed(current);
941 proc_id_connector(current, PROC_EVENT_GID);
945 asmlinkage long sys_getresgid(gid_t __user *rgid, gid_t __user *egid, gid_t __user *sgid)
949 if (!(retval = put_user(current->gid, rgid)) &&
950 !(retval = put_user(current->egid, egid)))
951 retval = put_user(current->sgid, sgid);
958 * "setfsuid()" sets the fsuid - the uid used for filesystem checks. This
959 * is used for "access()" and for the NFS daemon (letting nfsd stay at
960 * whatever uid it wants to). It normally shadows "euid", except when
961 * explicitly set by setfsuid() or for access..
963 asmlinkage long sys_setfsuid(uid_t uid)
967 old_fsuid = current->fsuid;
968 if (security_task_setuid(uid, (uid_t)-1, (uid_t)-1, LSM_SETID_FS))
971 if (uid == current->uid || uid == current->euid ||
972 uid == current->suid || uid == current->fsuid ||
975 if (uid != old_fsuid)
977 current->mm->dumpable = suid_dumpable;
980 current->fsuid = uid;
983 key_fsuid_changed(current);
984 proc_id_connector(current, PROC_EVENT_UID);
986 security_task_post_setuid(old_fsuid, (uid_t)-1, (uid_t)-1, LSM_SETID_FS);
992 * Samma på svenska..
994 asmlinkage long sys_setfsgid(gid_t gid)
998 old_fsgid = current->fsgid;
999 if (security_task_setgid(gid, (gid_t)-1, (gid_t)-1, LSM_SETID_FS))
1002 if (gid == current->gid || gid == current->egid ||
1003 gid == current->sgid || gid == current->fsgid ||
1004 capable(CAP_SETGID))
1006 if (gid != old_fsgid)
1008 current->mm->dumpable = suid_dumpable;
1011 current->fsgid = gid;
1012 key_fsgid_changed(current);
1013 proc_id_connector(current, PROC_EVENT_GID);
1018 asmlinkage long sys_times(struct tms __user * tbuf)
1021 * In the SMP world we might just be unlucky and have one of
1022 * the times increment as we use it. Since the value is an
1023 * atomically safe type this is just fine. Conceptually its
1024 * as if the syscall took an instant longer to occur.
1028 cputime_t utime, stime, cutime, cstime;
1031 if (thread_group_empty(current)) {
1033 * Single thread case without the use of any locks.
1035 * We may race with release_task if two threads are
1036 * executing. However, release task first adds up the
1037 * counters (__exit_signal) before removing the task
1038 * from the process tasklist (__unhash_process).
1039 * __exit_signal also acquires and releases the
1040 * siglock which results in the proper memory ordering
1041 * so that the list modifications are always visible
1042 * after the counters have been updated.
1044 * If the counters have been updated by the second thread
1045 * but the thread has not yet been removed from the list
1046 * then the other branch will be executing which will
1047 * block on tasklist_lock until the exit handling of the
1048 * other task is finished.
1050 * This also implies that the sighand->siglock cannot
1051 * be held by another processor. So we can also
1052 * skip acquiring that lock.
1054 utime = cputime_add(current->signal->utime, current->utime);
1055 stime = cputime_add(current->signal->utime, current->stime);
1056 cutime = current->signal->cutime;
1057 cstime = current->signal->cstime;
1062 /* Process with multiple threads */
1063 struct task_struct *tsk = current;
1064 struct task_struct *t;
1066 read_lock(&tasklist_lock);
1067 utime = tsk->signal->utime;
1068 stime = tsk->signal->stime;
1071 utime = cputime_add(utime, t->utime);
1072 stime = cputime_add(stime, t->stime);
1077 * While we have tasklist_lock read-locked, no dying thread
1078 * can be updating current->signal->[us]time. Instead,
1079 * we got their counts included in the live thread loop.
1080 * However, another thread can come in right now and
1081 * do a wait call that updates current->signal->c[us]time.
1082 * To make sure we always see that pair updated atomically,
1083 * we take the siglock around fetching them.
1085 spin_lock_irq(&tsk->sighand->siglock);
1086 cutime = tsk->signal->cutime;
1087 cstime = tsk->signal->cstime;
1088 spin_unlock_irq(&tsk->sighand->siglock);
1089 read_unlock(&tasklist_lock);
1091 tmp.tms_utime = cputime_to_clock_t(utime);
1092 tmp.tms_stime = cputime_to_clock_t(stime);
1093 tmp.tms_cutime = cputime_to_clock_t(cutime);
1094 tmp.tms_cstime = cputime_to_clock_t(cstime);
1095 if (copy_to_user(tbuf, &tmp, sizeof(struct tms)))
1098 return (long) jiffies_64_to_clock_t(get_jiffies_64());
1102 * This needs some heavy checking ...
1103 * I just haven't the stomach for it. I also don't fully
1104 * understand sessions/pgrp etc. Let somebody who does explain it.
1106 * OK, I think I have the protection semantics right.... this is really
1107 * only important on a multi-user system anyway, to make sure one user
1108 * can't send a signal to a process owned by another. -TYT, 12/12/91
1110 * Auch. Had to add the 'did_exec' flag to conform completely to POSIX.
1114 asmlinkage long sys_setpgid(pid_t pid, pid_t pgid)
1116 struct task_struct *p;
1117 struct task_struct *group_leader = current->group_leader;
1122 pid = vx_map_pid(group_leader->pid);
1128 rpgid = vx_rmap_pid(pgid);
1130 /* From this point forward we keep holding onto the tasklist lock
1131 * so that our parent does not change from under us. -DaveM
1133 write_lock_irq(&tasklist_lock);
1136 p = find_task_by_pid(pid);
1141 if (!thread_group_leader(p))
1144 if (p->real_parent == group_leader) {
1146 if (p->signal->session != group_leader->signal->session)
1153 if (p != group_leader)
1158 if (p->signal->leader)
1162 struct task_struct *p;
1164 do_each_task_pid(rpgid, PIDTYPE_PGID, p) {
1165 if (p->signal->session == group_leader->signal->session)
1167 } while_each_task_pid(rpgid, PIDTYPE_PGID, p);
1172 err = security_task_setpgid(p, rpgid);
1176 if (process_group(p) != rpgid) {
1177 detach_pid(p, PIDTYPE_PGID);
1178 p->signal->pgrp = rpgid;
1179 attach_pid(p, PIDTYPE_PGID, rpgid);
1184 /* All paths lead to here, thus we are safe. -DaveM */
1185 write_unlock_irq(&tasklist_lock);
1189 asmlinkage long sys_getpgid(pid_t pid)
1192 return vx_rmap_pid(process_group(current));
1195 struct task_struct *p;
1197 read_lock(&tasklist_lock);
1198 p = find_task_by_pid(pid);
1202 retval = security_task_getpgid(p);
1204 retval = vx_rmap_pid(process_group(p));
1206 read_unlock(&tasklist_lock);
1211 #ifdef __ARCH_WANT_SYS_GETPGRP
1213 asmlinkage long sys_getpgrp(void)
1215 /* SMP - assuming writes are word atomic this is fine */
1216 return process_group(current);
1221 asmlinkage long sys_getsid(pid_t pid)
1224 return current->signal->session;
1227 struct task_struct *p;
1229 read_lock(&tasklist_lock);
1230 p = find_task_by_pid(pid);
1234 retval = security_task_getsid(p);
1236 retval = p->signal->session;
1238 read_unlock(&tasklist_lock);
1243 asmlinkage long sys_setsid(void)
1245 struct task_struct *group_leader = current->group_leader;
1250 write_lock_irq(&tasklist_lock);
1252 pid = find_pid(PIDTYPE_PGID, group_leader->pid);
1256 group_leader->signal->leader = 1;
1257 __set_special_pids(group_leader->pid, group_leader->pid);
1258 group_leader->signal->tty = NULL;
1259 group_leader->signal->tty_old_pgrp = 0;
1260 err = process_group(group_leader);
1262 write_unlock_irq(&tasklist_lock);
1268 * Supplementary group IDs
1271 /* init to 2 - one for init_task, one to ensure it is never freed */
1272 struct group_info init_groups = { .usage = ATOMIC_INIT(2) };
1274 struct group_info *groups_alloc(int gidsetsize)
1276 struct group_info *group_info;
1280 nblocks = (gidsetsize + NGROUPS_PER_BLOCK - 1) / NGROUPS_PER_BLOCK;
1281 /* Make sure we always allocate at least one indirect block pointer */
1282 nblocks = nblocks ? : 1;
1283 group_info = kmalloc(sizeof(*group_info) + nblocks*sizeof(gid_t *), GFP_USER);
1286 group_info->ngroups = gidsetsize;
1287 group_info->nblocks = nblocks;
1288 atomic_set(&group_info->usage, 1);
1290 if (gidsetsize <= NGROUPS_SMALL) {
1291 group_info->blocks[0] = group_info->small_block;
1293 for (i = 0; i < nblocks; i++) {
1295 b = (void *)__get_free_page(GFP_USER);
1297 goto out_undo_partial_alloc;
1298 group_info->blocks[i] = b;
1303 out_undo_partial_alloc:
1305 free_page((unsigned long)group_info->blocks[i]);
1311 EXPORT_SYMBOL(groups_alloc);
1313 void groups_free(struct group_info *group_info)
1315 if (group_info->blocks[0] != group_info->small_block) {
1317 for (i = 0; i < group_info->nblocks; i++)
1318 free_page((unsigned long)group_info->blocks[i]);
1323 EXPORT_SYMBOL(groups_free);
1325 /* export the group_info to a user-space array */
1326 static int groups_to_user(gid_t __user *grouplist,
1327 struct group_info *group_info)
1330 int count = group_info->ngroups;
1332 for (i = 0; i < group_info->nblocks; i++) {
1333 int cp_count = min(NGROUPS_PER_BLOCK, count);
1334 int off = i * NGROUPS_PER_BLOCK;
1335 int len = cp_count * sizeof(*grouplist);
1337 if (copy_to_user(grouplist+off, group_info->blocks[i], len))
1345 /* fill a group_info from a user-space array - it must be allocated already */
1346 static int groups_from_user(struct group_info *group_info,
1347 gid_t __user *grouplist)
1350 int count = group_info->ngroups;
1352 for (i = 0; i < group_info->nblocks; i++) {
1353 int cp_count = min(NGROUPS_PER_BLOCK, count);
1354 int off = i * NGROUPS_PER_BLOCK;
1355 int len = cp_count * sizeof(*grouplist);
1357 if (copy_from_user(group_info->blocks[i], grouplist+off, len))
1365 /* a simple Shell sort */
1366 static void groups_sort(struct group_info *group_info)
1368 int base, max, stride;
1369 int gidsetsize = group_info->ngroups;
1371 for (stride = 1; stride < gidsetsize; stride = 3 * stride + 1)
1376 max = gidsetsize - stride;
1377 for (base = 0; base < max; base++) {
1379 int right = left + stride;
1380 gid_t tmp = GROUP_AT(group_info, right);
1382 while (left >= 0 && GROUP_AT(group_info, left) > tmp) {
1383 GROUP_AT(group_info, right) =
1384 GROUP_AT(group_info, left);
1388 GROUP_AT(group_info, right) = tmp;
1394 /* a simple bsearch */
1395 int groups_search(struct group_info *group_info, gid_t grp)
1403 right = group_info->ngroups;
1404 while (left < right) {
1405 int mid = (left+right)/2;
1406 int cmp = grp - GROUP_AT(group_info, mid);
1417 /* validate and set current->group_info */
1418 int set_current_groups(struct group_info *group_info)
1421 struct group_info *old_info;
1423 retval = security_task_setgroups(group_info);
1427 groups_sort(group_info);
1428 get_group_info(group_info);
1431 old_info = current->group_info;
1432 current->group_info = group_info;
1433 task_unlock(current);
1435 put_group_info(old_info);
1440 EXPORT_SYMBOL(set_current_groups);
1442 asmlinkage long sys_getgroups(int gidsetsize, gid_t __user *grouplist)
1447 * SMP: Nobody else can change our grouplist. Thus we are
1454 /* no need to grab task_lock here; it cannot change */
1455 get_group_info(current->group_info);
1456 i = current->group_info->ngroups;
1458 if (i > gidsetsize) {
1462 if (groups_to_user(grouplist, current->group_info)) {
1468 put_group_info(current->group_info);
1473 * SMP: Our groups are copy-on-write. We can set them safely
1474 * without another task interfering.
1477 asmlinkage long sys_setgroups(int gidsetsize, gid_t __user *grouplist)
1479 struct group_info *group_info;
1482 if (!capable(CAP_SETGID))
1484 if ((unsigned)gidsetsize > NGROUPS_MAX)
1487 group_info = groups_alloc(gidsetsize);
1490 retval = groups_from_user(group_info, grouplist);
1492 put_group_info(group_info);
1496 retval = set_current_groups(group_info);
1497 put_group_info(group_info);
1503 * Check whether we're fsgid/egid or in the supplemental group..
1505 int in_group_p(gid_t grp)
1508 if (grp != current->fsgid) {
1509 get_group_info(current->group_info);
1510 retval = groups_search(current->group_info, grp);
1511 put_group_info(current->group_info);
1516 EXPORT_SYMBOL(in_group_p);
1518 int in_egroup_p(gid_t grp)
1521 if (grp != current->egid) {
1522 get_group_info(current->group_info);
1523 retval = groups_search(current->group_info, grp);
1524 put_group_info(current->group_info);
1529 EXPORT_SYMBOL(in_egroup_p);
1531 DECLARE_RWSEM(uts_sem);
1533 EXPORT_SYMBOL(uts_sem);
1535 asmlinkage long sys_newuname(struct new_utsname __user * name)
1539 down_read(&uts_sem);
1540 if (copy_to_user(name, vx_new_utsname(), sizeof *name))
1546 asmlinkage long sys_sethostname(char __user *name, int len)
1549 char tmp[__NEW_UTS_LEN];
1551 if (!vx_capable(CAP_SYS_ADMIN, VXC_SET_UTSNAME))
1553 if (len < 0 || len > __NEW_UTS_LEN)
1555 down_write(&uts_sem);
1557 if (!copy_from_user(tmp, name, len)) {
1558 char *ptr = vx_new_uts(nodename);
1560 memcpy(ptr, tmp, len);
1568 #ifdef __ARCH_WANT_SYS_GETHOSTNAME
1570 asmlinkage long sys_gethostname(char __user *name, int len)
1577 down_read(&uts_sem);
1578 ptr = vx_new_uts(nodename);
1579 i = 1 + strlen(ptr);
1583 if (copy_to_user(name, ptr, i))
1592 * Only setdomainname; getdomainname can be implemented by calling
1595 asmlinkage long sys_setdomainname(char __user *name, int len)
1598 char tmp[__NEW_UTS_LEN];
1600 if (!vx_capable(CAP_SYS_ADMIN, VXC_SET_UTSNAME))
1602 if (len < 0 || len > __NEW_UTS_LEN)
1605 down_write(&uts_sem);
1607 if (!copy_from_user(tmp, name, len)) {
1608 char *ptr = vx_new_uts(domainname);
1610 memcpy(ptr, tmp, len);
1618 asmlinkage long sys_getrlimit(unsigned int resource, struct rlimit __user *rlim)
1620 if (resource >= RLIM_NLIMITS)
1623 struct rlimit value;
1624 task_lock(current->group_leader);
1625 value = current->signal->rlim[resource];
1626 task_unlock(current->group_leader);
1627 return copy_to_user(rlim, &value, sizeof(*rlim)) ? -EFAULT : 0;
1631 #ifdef __ARCH_WANT_SYS_OLD_GETRLIMIT
1634 * Back compatibility for getrlimit. Needed for some apps.
1637 asmlinkage long sys_old_getrlimit(unsigned int resource, struct rlimit __user *rlim)
1640 if (resource >= RLIM_NLIMITS)
1643 task_lock(current->group_leader);
1644 x = current->signal->rlim[resource];
1645 task_unlock(current->group_leader);
1646 if(x.rlim_cur > 0x7FFFFFFF)
1647 x.rlim_cur = 0x7FFFFFFF;
1648 if(x.rlim_max > 0x7FFFFFFF)
1649 x.rlim_max = 0x7FFFFFFF;
1650 return copy_to_user(rlim, &x, sizeof(x))?-EFAULT:0;
1655 asmlinkage long sys_setrlimit(unsigned int resource, struct rlimit __user *rlim)
1657 struct rlimit new_rlim, *old_rlim;
1660 if (resource >= RLIM_NLIMITS)
1662 if(copy_from_user(&new_rlim, rlim, sizeof(*rlim)))
1664 if (new_rlim.rlim_cur > new_rlim.rlim_max)
1666 old_rlim = current->signal->rlim + resource;
1667 if ((new_rlim.rlim_max > old_rlim->rlim_max) &&
1668 !vx_capable(CAP_SYS_RESOURCE, VXC_SET_RLIMIT))
1670 if (resource == RLIMIT_NOFILE && new_rlim.rlim_max > NR_OPEN)
1673 retval = security_task_setrlimit(resource, &new_rlim);
1677 task_lock(current->group_leader);
1678 *old_rlim = new_rlim;
1679 task_unlock(current->group_leader);
1681 if (resource == RLIMIT_CPU && new_rlim.rlim_cur != RLIM_INFINITY &&
1682 (cputime_eq(current->signal->it_prof_expires, cputime_zero) ||
1683 new_rlim.rlim_cur <= cputime_to_secs(
1684 current->signal->it_prof_expires))) {
1685 unsigned long rlim_cur = new_rlim.rlim_cur;
1688 if (rlim_cur == 0) {
1690 * The caller is asking for an immediate RLIMIT_CPU
1691 * expiry. But we use the zero value to mean "it was
1692 * never set". So let's cheat and make it one second
1697 cputime = secs_to_cputime(rlim_cur);
1698 read_lock(&tasklist_lock);
1699 spin_lock_irq(¤t->sighand->siglock);
1700 set_process_cpu_timer(current, CPUCLOCK_PROF,
1702 spin_unlock_irq(¤t->sighand->siglock);
1703 read_unlock(&tasklist_lock);
1710 * It would make sense to put struct rusage in the task_struct,
1711 * except that would make the task_struct be *really big*. After
1712 * task_struct gets moved into malloc'ed memory, it would
1713 * make sense to do this. It will make moving the rest of the information
1714 * a lot simpler! (Which we're not doing right now because we're not
1715 * measuring them yet).
1717 * This expects to be called with tasklist_lock read-locked or better,
1718 * and the siglock not locked. It may momentarily take the siglock.
1720 * When sampling multiple threads for RUSAGE_SELF, under SMP we might have
1721 * races with threads incrementing their own counters. But since word
1722 * reads are atomic, we either get new values or old values and we don't
1723 * care which for the sums. We always take the siglock to protect reading
1724 * the c* fields from p->signal from races with exit.c updating those
1725 * fields when reaping, so a sample either gets all the additions of a
1726 * given child after it's reaped, or none so this sample is before reaping.
1729 static void k_getrusage(struct task_struct *p, int who, struct rusage *r)
1731 struct task_struct *t;
1732 unsigned long flags;
1733 cputime_t utime, stime;
1735 memset((char *) r, 0, sizeof *r);
1737 if (unlikely(!p->signal))
1740 utime = stime = cputime_zero;
1744 case RUSAGE_CHILDREN:
1745 spin_lock_irqsave(&p->sighand->siglock, flags);
1746 utime = p->signal->cutime;
1747 stime = p->signal->cstime;
1748 r->ru_nvcsw = p->signal->cnvcsw;
1749 r->ru_nivcsw = p->signal->cnivcsw;
1750 r->ru_minflt = p->signal->cmin_flt;
1751 r->ru_majflt = p->signal->cmaj_flt;
1752 spin_unlock_irqrestore(&p->sighand->siglock, flags);
1754 if (who == RUSAGE_CHILDREN)
1758 utime = cputime_add(utime, p->signal->utime);
1759 stime = cputime_add(stime, p->signal->stime);
1760 r->ru_nvcsw += p->signal->nvcsw;
1761 r->ru_nivcsw += p->signal->nivcsw;
1762 r->ru_minflt += p->signal->min_flt;
1763 r->ru_majflt += p->signal->maj_flt;
1766 utime = cputime_add(utime, t->utime);
1767 stime = cputime_add(stime, t->stime);
1768 r->ru_nvcsw += t->nvcsw;
1769 r->ru_nivcsw += t->nivcsw;
1770 r->ru_minflt += t->min_flt;
1771 r->ru_majflt += t->maj_flt;
1780 cputime_to_timeval(utime, &r->ru_utime);
1781 cputime_to_timeval(stime, &r->ru_stime);
1784 int getrusage(struct task_struct *p, int who, struct rusage __user *ru)
1787 read_lock(&tasklist_lock);
1788 k_getrusage(p, who, &r);
1789 read_unlock(&tasklist_lock);
1790 return copy_to_user(ru, &r, sizeof(r)) ? -EFAULT : 0;
1793 asmlinkage long sys_getrusage(int who, struct rusage __user *ru)
1795 if (who != RUSAGE_SELF && who != RUSAGE_CHILDREN)
1797 return getrusage(current, who, ru);
1800 asmlinkage long sys_umask(int mask)
1802 mask = xchg(¤t->fs->umask, mask & S_IRWXUGO);
1806 asmlinkage long sys_prctl(int option, unsigned long arg2, unsigned long arg3,
1807 unsigned long arg4, unsigned long arg5)
1811 error = security_task_prctl(option, arg2, arg3, arg4, arg5);
1816 case PR_SET_PDEATHSIG:
1817 if (!valid_signal(arg2)) {
1821 current->pdeath_signal = arg2;
1823 case PR_GET_PDEATHSIG:
1824 error = put_user(current->pdeath_signal, (int __user *)arg2);
1826 case PR_GET_DUMPABLE:
1827 error = current->mm->dumpable;
1829 case PR_SET_DUMPABLE:
1830 if (arg2 < 0 || arg2 > 1) {
1834 current->mm->dumpable = arg2;
1837 case PR_SET_UNALIGN:
1838 error = SET_UNALIGN_CTL(current, arg2);
1840 case PR_GET_UNALIGN:
1841 error = GET_UNALIGN_CTL(current, arg2);
1844 error = SET_FPEMU_CTL(current, arg2);
1847 error = GET_FPEMU_CTL(current, arg2);
1850 error = SET_FPEXC_CTL(current, arg2);
1853 error = GET_FPEXC_CTL(current, arg2);
1856 error = PR_TIMING_STATISTICAL;
1859 if (arg2 == PR_TIMING_STATISTICAL)
1865 case PR_GET_KEEPCAPS:
1866 if (current->keep_capabilities)
1869 case PR_SET_KEEPCAPS:
1870 if (arg2 != 0 && arg2 != 1) {
1874 current->keep_capabilities = arg2;
1877 struct task_struct *me = current;
1878 unsigned char ncomm[sizeof(me->comm)];
1880 ncomm[sizeof(me->comm)-1] = 0;
1881 if (strncpy_from_user(ncomm, (char __user *)arg2,
1882 sizeof(me->comm)-1) < 0)
1884 set_task_comm(me, ncomm);
1888 struct task_struct *me = current;
1889 unsigned char tcomm[sizeof(me->comm)];
1891 get_task_comm(tcomm, me);
1892 if (copy_to_user((char __user *)arg2, tcomm, sizeof(tcomm)))