4 * Copyright (C) 1991, 1992 Linus Torvalds
7 #include <linux/module.h>
9 #include <linux/utsname.h>
10 #include <linux/mman.h>
11 #include <linux/smp_lock.h>
12 #include <linux/notifier.h>
13 #include <linux/reboot.h>
14 #include <linux/prctl.h>
15 #include <linux/highuid.h>
17 #include <linux/kernel.h>
18 #include <linux/kexec.h>
19 #include <linux/workqueue.h>
20 #include <linux/capability.h>
21 #include <linux/device.h>
22 #include <linux/key.h>
23 #include <linux/times.h>
24 #include <linux/posix-timers.h>
25 #include <linux/security.h>
26 #include <linux/dcookies.h>
27 #include <linux/suspend.h>
28 #include <linux/tty.h>
29 #include <linux/signal.h>
30 #include <linux/cn_proc.h>
31 #include <linux/getcpu.h>
33 #include <linux/compat.h>
34 #include <linux/syscalls.h>
35 #include <linux/kprobes.h>
36 #include <linux/vs_pid.h>
38 #include <asm/uaccess.h>
40 #include <asm/unistd.h>
42 #ifndef SET_UNALIGN_CTL
43 # define SET_UNALIGN_CTL(a,b) (-EINVAL)
45 #ifndef GET_UNALIGN_CTL
46 # define GET_UNALIGN_CTL(a,b) (-EINVAL)
49 # define SET_FPEMU_CTL(a,b) (-EINVAL)
52 # define GET_FPEMU_CTL(a,b) (-EINVAL)
55 # define SET_FPEXC_CTL(a,b) (-EINVAL)
58 # define GET_FPEXC_CTL(a,b) (-EINVAL)
61 # define GET_ENDIAN(a,b) (-EINVAL)
64 # define SET_ENDIAN(a,b) (-EINVAL)
68 * this is where the system-wide overflow UID and GID are defined, for
69 * architectures that now have 32-bit UID/GID but didn't in the past
72 int overflowuid = DEFAULT_OVERFLOWUID;
73 int overflowgid = DEFAULT_OVERFLOWGID;
76 EXPORT_SYMBOL(overflowuid);
77 EXPORT_SYMBOL(overflowgid);
81 * the same as above, but for filesystems which can only store a 16-bit
82 * UID and GID. as such, this is needed on all architectures
85 int fs_overflowuid = DEFAULT_FS_OVERFLOWUID;
86 int fs_overflowgid = DEFAULT_FS_OVERFLOWUID;
88 EXPORT_SYMBOL(fs_overflowuid);
89 EXPORT_SYMBOL(fs_overflowgid);
92 * this indicates whether you can reboot with ctrl-alt-del: the default is yes
97 EXPORT_SYMBOL(cad_pid);
100 * Notifier list for kernel code which wants to be called
101 * at shutdown. This is used to stop any idling DMA operations
105 static BLOCKING_NOTIFIER_HEAD(reboot_notifier_list);
108 * Notifier chain core routines. The exported routines below
109 * are layered on top of these, with appropriate locking added.
112 static int notifier_chain_register(struct notifier_block **nl,
113 struct notifier_block *n)
115 while ((*nl) != NULL) {
116 if (n->priority > (*nl)->priority)
121 rcu_assign_pointer(*nl, n);
125 static int notifier_chain_unregister(struct notifier_block **nl,
126 struct notifier_block *n)
128 while ((*nl) != NULL) {
130 rcu_assign_pointer(*nl, n->next);
138 static int __kprobes notifier_call_chain(struct notifier_block **nl,
139 unsigned long val, void *v)
141 int ret = NOTIFY_DONE;
142 struct notifier_block *nb, *next_nb;
144 nb = rcu_dereference(*nl);
146 next_nb = rcu_dereference(nb->next);
147 ret = nb->notifier_call(nb, val, v);
148 if ((ret & NOTIFY_STOP_MASK) == NOTIFY_STOP_MASK)
156 * Atomic notifier chain routines. Registration and unregistration
157 * use a spinlock, and call_chain is synchronized by RCU (no locks).
161 * atomic_notifier_chain_register - Add notifier to an atomic notifier chain
162 * @nh: Pointer to head of the atomic notifier chain
163 * @n: New entry in notifier chain
165 * Adds a notifier to an atomic notifier chain.
167 * Currently always returns zero.
170 int atomic_notifier_chain_register(struct atomic_notifier_head *nh,
171 struct notifier_block *n)
176 spin_lock_irqsave(&nh->lock, flags);
177 ret = notifier_chain_register(&nh->head, n);
178 spin_unlock_irqrestore(&nh->lock, flags);
182 EXPORT_SYMBOL_GPL(atomic_notifier_chain_register);
185 * atomic_notifier_chain_unregister - Remove notifier from an atomic notifier chain
186 * @nh: Pointer to head of the atomic notifier chain
187 * @n: Entry to remove from notifier chain
189 * Removes a notifier from an atomic notifier chain.
191 * Returns zero on success or %-ENOENT on failure.
193 int atomic_notifier_chain_unregister(struct atomic_notifier_head *nh,
194 struct notifier_block *n)
199 spin_lock_irqsave(&nh->lock, flags);
200 ret = notifier_chain_unregister(&nh->head, n);
201 spin_unlock_irqrestore(&nh->lock, flags);
206 EXPORT_SYMBOL_GPL(atomic_notifier_chain_unregister);
209 * atomic_notifier_call_chain - Call functions in an atomic notifier chain
210 * @nh: Pointer to head of the atomic notifier chain
211 * @val: Value passed unmodified to notifier function
212 * @v: Pointer passed unmodified to notifier function
214 * Calls each function in a notifier chain in turn. The functions
215 * run in an atomic context, so they must not block.
216 * This routine uses RCU to synchronize with changes to the chain.
218 * If the return value of the notifier can be and'ed
219 * with %NOTIFY_STOP_MASK then atomic_notifier_call_chain
220 * will return immediately, with the return value of
221 * the notifier function which halted execution.
222 * Otherwise the return value is the return value
223 * of the last notifier function called.
226 int __kprobes atomic_notifier_call_chain(struct atomic_notifier_head *nh,
227 unsigned long val, void *v)
232 ret = notifier_call_chain(&nh->head, val, v);
237 EXPORT_SYMBOL_GPL(atomic_notifier_call_chain);
240 * Blocking notifier chain routines. All access to the chain is
241 * synchronized by an rwsem.
245 * blocking_notifier_chain_register - Add notifier to a blocking notifier chain
246 * @nh: Pointer to head of the blocking notifier chain
247 * @n: New entry in notifier chain
249 * Adds a notifier to a blocking notifier chain.
250 * Must be called in process context.
252 * Currently always returns zero.
255 int blocking_notifier_chain_register(struct blocking_notifier_head *nh,
256 struct notifier_block *n)
261 * This code gets used during boot-up, when task switching is
262 * not yet working and interrupts must remain disabled. At
263 * such times we must not call down_write().
265 if (unlikely(system_state == SYSTEM_BOOTING))
266 return notifier_chain_register(&nh->head, n);
268 down_write(&nh->rwsem);
269 ret = notifier_chain_register(&nh->head, n);
270 up_write(&nh->rwsem);
274 EXPORT_SYMBOL_GPL(blocking_notifier_chain_register);
277 * blocking_notifier_chain_unregister - Remove notifier from a blocking notifier chain
278 * @nh: Pointer to head of the blocking notifier chain
279 * @n: Entry to remove from notifier chain
281 * Removes a notifier from a blocking notifier chain.
282 * Must be called from process context.
284 * Returns zero on success or %-ENOENT on failure.
286 int blocking_notifier_chain_unregister(struct blocking_notifier_head *nh,
287 struct notifier_block *n)
292 * This code gets used during boot-up, when task switching is
293 * not yet working and interrupts must remain disabled. At
294 * such times we must not call down_write().
296 if (unlikely(system_state == SYSTEM_BOOTING))
297 return notifier_chain_unregister(&nh->head, n);
299 down_write(&nh->rwsem);
300 ret = notifier_chain_unregister(&nh->head, n);
301 up_write(&nh->rwsem);
305 EXPORT_SYMBOL_GPL(blocking_notifier_chain_unregister);
308 * blocking_notifier_call_chain - Call functions in a blocking notifier chain
309 * @nh: Pointer to head of the blocking notifier chain
310 * @val: Value passed unmodified to notifier function
311 * @v: Pointer passed unmodified to notifier function
313 * Calls each function in a notifier chain in turn. The functions
314 * run in a process context, so they are allowed to block.
316 * If the return value of the notifier can be and'ed
317 * with %NOTIFY_STOP_MASK then blocking_notifier_call_chain
318 * will return immediately, with the return value of
319 * the notifier function which halted execution.
320 * Otherwise the return value is the return value
321 * of the last notifier function called.
324 int blocking_notifier_call_chain(struct blocking_notifier_head *nh,
325 unsigned long val, void *v)
327 int ret = NOTIFY_DONE;
330 * We check the head outside the lock, but if this access is
331 * racy then it does not matter what the result of the test
332 * is, we re-check the list after having taken the lock anyway:
334 if (rcu_dereference(nh->head)) {
335 down_read(&nh->rwsem);
336 ret = notifier_call_chain(&nh->head, val, v);
342 EXPORT_SYMBOL_GPL(blocking_notifier_call_chain);
345 * Raw notifier chain routines. There is no protection;
346 * the caller must provide it. Use at your own risk!
350 * raw_notifier_chain_register - Add notifier to a raw notifier chain
351 * @nh: Pointer to head of the raw notifier chain
352 * @n: New entry in notifier chain
354 * Adds a notifier to a raw notifier chain.
355 * All locking must be provided by the caller.
357 * Currently always returns zero.
360 int raw_notifier_chain_register(struct raw_notifier_head *nh,
361 struct notifier_block *n)
363 return notifier_chain_register(&nh->head, n);
366 EXPORT_SYMBOL_GPL(raw_notifier_chain_register);
369 * raw_notifier_chain_unregister - Remove notifier from a raw notifier chain
370 * @nh: Pointer to head of the raw notifier chain
371 * @n: Entry to remove from notifier chain
373 * Removes a notifier from a raw notifier chain.
374 * All locking must be provided by the caller.
376 * Returns zero on success or %-ENOENT on failure.
378 int raw_notifier_chain_unregister(struct raw_notifier_head *nh,
379 struct notifier_block *n)
381 return notifier_chain_unregister(&nh->head, n);
384 EXPORT_SYMBOL_GPL(raw_notifier_chain_unregister);
387 * raw_notifier_call_chain - Call functions in a raw notifier chain
388 * @nh: Pointer to head of the raw notifier chain
389 * @val: Value passed unmodified to notifier function
390 * @v: Pointer passed unmodified to notifier function
392 * Calls each function in a notifier chain in turn. The functions
393 * run in an undefined context.
394 * All locking must be provided by the caller.
396 * If the return value of the notifier can be and'ed
397 * with %NOTIFY_STOP_MASK then raw_notifier_call_chain
398 * will return immediately, with the return value of
399 * the notifier function which halted execution.
400 * Otherwise the return value is the return value
401 * of the last notifier function called.
404 int raw_notifier_call_chain(struct raw_notifier_head *nh,
405 unsigned long val, void *v)
407 return notifier_call_chain(&nh->head, val, v);
410 EXPORT_SYMBOL_GPL(raw_notifier_call_chain);
413 * SRCU notifier chain routines. Registration and unregistration
414 * use a mutex, and call_chain is synchronized by SRCU (no locks).
418 * srcu_notifier_chain_register - Add notifier to an SRCU notifier chain
419 * @nh: Pointer to head of the SRCU notifier chain
420 * @n: New entry in notifier chain
422 * Adds a notifier to an SRCU notifier chain.
423 * Must be called in process context.
425 * Currently always returns zero.
428 int srcu_notifier_chain_register(struct srcu_notifier_head *nh,
429 struct notifier_block *n)
434 * This code gets used during boot-up, when task switching is
435 * not yet working and interrupts must remain disabled. At
436 * such times we must not call mutex_lock().
438 if (unlikely(system_state == SYSTEM_BOOTING))
439 return notifier_chain_register(&nh->head, n);
441 mutex_lock(&nh->mutex);
442 ret = notifier_chain_register(&nh->head, n);
443 mutex_unlock(&nh->mutex);
447 EXPORT_SYMBOL_GPL(srcu_notifier_chain_register);
450 * srcu_notifier_chain_unregister - Remove notifier from an SRCU notifier chain
451 * @nh: Pointer to head of the SRCU notifier chain
452 * @n: Entry to remove from notifier chain
454 * Removes a notifier from an SRCU notifier chain.
455 * Must be called from process context.
457 * Returns zero on success or %-ENOENT on failure.
459 int srcu_notifier_chain_unregister(struct srcu_notifier_head *nh,
460 struct notifier_block *n)
465 * This code gets used during boot-up, when task switching is
466 * not yet working and interrupts must remain disabled. At
467 * such times we must not call mutex_lock().
469 if (unlikely(system_state == SYSTEM_BOOTING))
470 return notifier_chain_unregister(&nh->head, n);
472 mutex_lock(&nh->mutex);
473 ret = notifier_chain_unregister(&nh->head, n);
474 mutex_unlock(&nh->mutex);
475 synchronize_srcu(&nh->srcu);
479 EXPORT_SYMBOL_GPL(srcu_notifier_chain_unregister);
482 * srcu_notifier_call_chain - Call functions in an SRCU notifier chain
483 * @nh: Pointer to head of the SRCU notifier chain
484 * @val: Value passed unmodified to notifier function
485 * @v: Pointer passed unmodified to notifier function
487 * Calls each function in a notifier chain in turn. The functions
488 * run in a process context, so they are allowed to block.
490 * If the return value of the notifier can be and'ed
491 * with %NOTIFY_STOP_MASK then srcu_notifier_call_chain
492 * will return immediately, with the return value of
493 * the notifier function which halted execution.
494 * Otherwise the return value is the return value
495 * of the last notifier function called.
498 int srcu_notifier_call_chain(struct srcu_notifier_head *nh,
499 unsigned long val, void *v)
504 idx = srcu_read_lock(&nh->srcu);
505 ret = notifier_call_chain(&nh->head, val, v);
506 srcu_read_unlock(&nh->srcu, idx);
510 EXPORT_SYMBOL_GPL(srcu_notifier_call_chain);
513 * srcu_init_notifier_head - Initialize an SRCU notifier head
514 * @nh: Pointer to head of the srcu notifier chain
516 * Unlike other sorts of notifier heads, SRCU notifier heads require
517 * dynamic initialization. Be sure to call this routine before
518 * calling any of the other SRCU notifier routines for this head.
520 * If an SRCU notifier head is deallocated, it must first be cleaned
521 * up by calling srcu_cleanup_notifier_head(). Otherwise the head's
522 * per-cpu data (used by the SRCU mechanism) will leak.
525 void srcu_init_notifier_head(struct srcu_notifier_head *nh)
527 mutex_init(&nh->mutex);
528 if (init_srcu_struct(&nh->srcu) < 0)
533 EXPORT_SYMBOL_GPL(srcu_init_notifier_head);
536 * register_reboot_notifier - Register function to be called at reboot time
537 * @nb: Info about notifier function to be called
539 * Registers a function with the list of functions
540 * to be called at reboot time.
542 * Currently always returns zero, as blocking_notifier_chain_register
543 * always returns zero.
546 int register_reboot_notifier(struct notifier_block * nb)
548 return blocking_notifier_chain_register(&reboot_notifier_list, nb);
551 EXPORT_SYMBOL(register_reboot_notifier);
554 * unregister_reboot_notifier - Unregister previously registered reboot notifier
555 * @nb: Hook to be unregistered
557 * Unregisters a previously registered reboot
560 * Returns zero on success, or %-ENOENT on failure.
563 int unregister_reboot_notifier(struct notifier_block * nb)
565 return blocking_notifier_chain_unregister(&reboot_notifier_list, nb);
568 EXPORT_SYMBOL(unregister_reboot_notifier);
570 static int set_one_prio(struct task_struct *p, int niceval, int error)
574 if (p->uid != current->euid &&
575 p->euid != current->euid && !capable(CAP_SYS_NICE)) {
579 if (niceval < task_nice(p) && !can_nice(p, niceval)) {
580 if (vx_flags(VXF_IGNEG_NICE, 0))
586 no_nice = security_task_setnice(p, niceval);
593 set_user_nice(p, niceval);
598 asmlinkage long sys_setpriority(int which, int who, int niceval)
600 struct task_struct *g, *p;
601 struct user_struct *user;
604 if (which > 2 || which < 0)
607 /* normalize: avoid signed division (rounding problems) */
614 read_lock(&tasklist_lock);
619 p = find_task_by_pid(who);
621 error = set_one_prio(p, niceval, error);
625 who = process_group(current);
626 do_each_task_pid(who, PIDTYPE_PGID, p) {
627 if (!vx_check(p->xid, VS_ADMIN_P | VS_IDENT))
629 error = set_one_prio(p, niceval, error);
630 } while_each_task_pid(who, PIDTYPE_PGID, p);
633 user = current->user;
637 if ((who != current->uid) &&
638 !(user = find_user(vx_current_xid(), who)))
639 goto out_unlock; /* No processes for this user */
643 error = set_one_prio(p, niceval, error);
644 while_each_thread(g, p);
645 if (who != current->uid)
646 free_uid(user); /* For find_user() */
650 read_unlock(&tasklist_lock);
656 * Ugh. To avoid negative return values, "getpriority()" will
657 * not return the normal nice-value, but a negated value that
658 * has been offset by 20 (ie it returns 40..1 instead of -20..19)
659 * to stay compatible.
661 asmlinkage long sys_getpriority(int which, int who)
663 struct task_struct *g, *p;
664 struct user_struct *user;
665 long niceval, retval = -ESRCH;
667 if (which > 2 || which < 0)
670 read_lock(&tasklist_lock);
675 p = find_task_by_pid(who);
677 niceval = 20 - task_nice(p);
678 if (niceval > retval)
684 who = process_group(current);
685 do_each_task_pid(who, PIDTYPE_PGID, p) {
686 if (!vx_check(p->xid, VS_ADMIN_P | VS_IDENT))
688 niceval = 20 - task_nice(p);
689 if (niceval > retval)
691 } while_each_task_pid(who, PIDTYPE_PGID, p);
694 user = current->user;
698 if ((who != current->uid) &&
699 !(user = find_user(vx_current_xid(), who)))
700 goto out_unlock; /* No processes for this user */
704 niceval = 20 - task_nice(p);
705 if (niceval > retval)
708 while_each_thread(g, p);
709 if (who != current->uid)
710 free_uid(user); /* for find_user() */
714 read_unlock(&tasklist_lock);
720 * emergency_restart - reboot the system
722 * Without shutting down any hardware or taking any locks
723 * reboot the system. This is called when we know we are in
724 * trouble so this is our best effort to reboot. This is
725 * safe to call in interrupt context.
727 void emergency_restart(void)
729 machine_emergency_restart();
731 EXPORT_SYMBOL_GPL(emergency_restart);
733 static void kernel_restart_prepare(char *cmd)
735 blocking_notifier_call_chain(&reboot_notifier_list, SYS_RESTART, cmd);
736 system_state = SYSTEM_RESTART;
741 * kernel_restart - reboot the system
742 * @cmd: pointer to buffer containing command to execute for restart
745 * Shutdown everything and perform a clean reboot.
746 * This is not safe to call in interrupt context.
748 void kernel_restart(char *cmd)
750 kernel_restart_prepare(cmd);
752 printk(KERN_EMERG "Restarting system.\n");
754 printk(KERN_EMERG "Restarting system with command '%s'.\n", cmd);
755 machine_restart(cmd);
757 EXPORT_SYMBOL_GPL(kernel_restart);
760 * kernel_kexec - reboot the system
762 * Move into place and start executing a preloaded standalone
763 * executable. If nothing was preloaded return an error.
765 static void kernel_kexec(void)
768 struct kimage *image;
769 image = xchg(&kexec_image, NULL);
772 kernel_restart_prepare(NULL);
773 printk(KERN_EMERG "Starting new kernel\n");
775 machine_kexec(image);
779 void kernel_shutdown_prepare(enum system_states state)
781 blocking_notifier_call_chain(&reboot_notifier_list,
782 (state == SYSTEM_HALT)?SYS_HALT:SYS_POWER_OFF, NULL);
783 system_state = state;
787 * kernel_halt - halt the system
789 * Shutdown everything and perform a clean system halt.
791 void kernel_halt(void)
793 kernel_shutdown_prepare(SYSTEM_HALT);
794 printk(KERN_EMERG "System halted.\n");
798 EXPORT_SYMBOL_GPL(kernel_halt);
801 * kernel_power_off - power_off the system
803 * Shutdown everything and perform a clean system power_off.
805 void kernel_power_off(void)
807 kernel_shutdown_prepare(SYSTEM_POWER_OFF);
808 printk(KERN_EMERG "Power down.\n");
811 EXPORT_SYMBOL_GPL(kernel_power_off);
813 long vs_reboot(unsigned int, void __user *);
816 * Reboot system call: for obvious reasons only root may call it,
817 * and even root needs to set up some magic numbers in the registers
818 * so that some mistake won't make this reboot the whole machine.
819 * You can also set the meaning of the ctrl-alt-del-key here.
821 * reboot doesn't sync: do that yourself before calling this.
823 asmlinkage long sys_reboot(int magic1, int magic2, unsigned int cmd, void __user * arg)
827 /* We only trust the superuser with rebooting the system. */
828 if (!capable(CAP_SYS_BOOT))
831 /* For safety, we require "magic" arguments. */
832 if (magic1 != LINUX_REBOOT_MAGIC1 ||
833 (magic2 != LINUX_REBOOT_MAGIC2 &&
834 magic2 != LINUX_REBOOT_MAGIC2A &&
835 magic2 != LINUX_REBOOT_MAGIC2B &&
836 magic2 != LINUX_REBOOT_MAGIC2C))
839 /* Instead of trying to make the power_off code look like
840 * halt when pm_power_off is not set do it the easy way.
842 if ((cmd == LINUX_REBOOT_CMD_POWER_OFF) && !pm_power_off)
843 cmd = LINUX_REBOOT_CMD_HALT;
845 if (!vx_check(0, VS_ADMIN|VS_WATCH))
846 return vs_reboot(cmd, arg);
850 case LINUX_REBOOT_CMD_RESTART:
851 kernel_restart(NULL);
854 case LINUX_REBOOT_CMD_CAD_ON:
858 case LINUX_REBOOT_CMD_CAD_OFF:
862 case LINUX_REBOOT_CMD_HALT:
868 case LINUX_REBOOT_CMD_POWER_OFF:
874 case LINUX_REBOOT_CMD_RESTART2:
875 if (strncpy_from_user(&buffer[0], arg, sizeof(buffer) - 1) < 0) {
879 buffer[sizeof(buffer) - 1] = '\0';
881 kernel_restart(buffer);
884 case LINUX_REBOOT_CMD_KEXEC:
889 #ifdef CONFIG_SOFTWARE_SUSPEND
890 case LINUX_REBOOT_CMD_SW_SUSPEND:
892 int ret = software_suspend();
906 static void deferred_cad(struct work_struct *dummy)
908 kernel_restart(NULL);
912 * This function gets called by ctrl-alt-del - ie the keyboard interrupt.
913 * As it's called within an interrupt, it may NOT sync: the only choice
914 * is whether to reboot at once, or just ignore the ctrl-alt-del.
916 void ctrl_alt_del(void)
918 static DECLARE_WORK(cad_work, deferred_cad);
921 schedule_work(&cad_work);
923 kill_cad_pid(SIGINT, 1);
927 * Unprivileged users may change the real gid to the effective gid
928 * or vice versa. (BSD-style)
930 * If you set the real gid at all, or set the effective gid to a value not
931 * equal to the real gid, then the saved gid is set to the new effective gid.
933 * This makes it possible for a setgid program to completely drop its
934 * privileges, which is often a useful assertion to make when you are doing
935 * a security audit over a program.
937 * The general idea is that a program which uses just setregid() will be
938 * 100% compatible with BSD. A program which uses just setgid() will be
939 * 100% compatible with POSIX with saved IDs.
941 * SMP: There are not races, the GIDs are checked only by filesystem
942 * operations (as far as semantic preservation is concerned).
944 asmlinkage long sys_setregid(gid_t rgid, gid_t egid)
946 int old_rgid = current->gid;
947 int old_egid = current->egid;
948 int new_rgid = old_rgid;
949 int new_egid = old_egid;
952 retval = security_task_setgid(rgid, egid, (gid_t)-1, LSM_SETID_RE);
956 if (rgid != (gid_t) -1) {
957 if ((old_rgid == rgid) ||
958 (current->egid==rgid) ||
964 if (egid != (gid_t) -1) {
965 if ((old_rgid == egid) ||
966 (current->egid == egid) ||
967 (current->sgid == egid) ||
973 if (new_egid != old_egid) {
974 current->mm->dumpable = suid_dumpable;
977 if (rgid != (gid_t) -1 ||
978 (egid != (gid_t) -1 && egid != old_rgid))
979 current->sgid = new_egid;
980 current->fsgid = new_egid;
981 current->egid = new_egid;
982 current->gid = new_rgid;
983 key_fsgid_changed(current);
984 proc_id_connector(current, PROC_EVENT_GID);
989 * setgid() is implemented like SysV w/ SAVED_IDS
991 * SMP: Same implicit races as above.
993 asmlinkage long sys_setgid(gid_t gid)
995 int old_egid = current->egid;
998 retval = security_task_setgid(gid, (gid_t)-1, (gid_t)-1, LSM_SETID_ID);
1002 if (capable(CAP_SETGID)) {
1003 if (old_egid != gid) {
1004 current->mm->dumpable = suid_dumpable;
1007 current->gid = current->egid = current->sgid = current->fsgid = gid;
1008 } else if ((gid == current->gid) || (gid == current->sgid)) {
1009 if (old_egid != gid) {
1010 current->mm->dumpable = suid_dumpable;
1013 current->egid = current->fsgid = gid;
1018 key_fsgid_changed(current);
1019 proc_id_connector(current, PROC_EVENT_GID);
1023 static int set_user(uid_t new_ruid, int dumpclear)
1025 struct user_struct *new_user;
1027 new_user = alloc_uid(vx_current_xid(), new_ruid);
1031 if (atomic_read(&new_user->processes) >=
1032 current->signal->rlim[RLIMIT_NPROC].rlim_cur &&
1033 new_user != &root_user) {
1038 switch_uid(new_user);
1041 current->mm->dumpable = suid_dumpable;
1044 current->uid = new_ruid;
1049 * Unprivileged users may change the real uid to the effective uid
1050 * or vice versa. (BSD-style)
1052 * If you set the real uid at all, or set the effective uid to a value not
1053 * equal to the real uid, then the saved uid is set to the new effective uid.
1055 * This makes it possible for a setuid program to completely drop its
1056 * privileges, which is often a useful assertion to make when you are doing
1057 * a security audit over a program.
1059 * The general idea is that a program which uses just setreuid() will be
1060 * 100% compatible with BSD. A program which uses just setuid() will be
1061 * 100% compatible with POSIX with saved IDs.
1063 asmlinkage long sys_setreuid(uid_t ruid, uid_t euid)
1065 int old_ruid, old_euid, old_suid, new_ruid, new_euid;
1068 retval = security_task_setuid(ruid, euid, (uid_t)-1, LSM_SETID_RE);
1072 new_ruid = old_ruid = current->uid;
1073 new_euid = old_euid = current->euid;
1074 old_suid = current->suid;
1076 if (ruid != (uid_t) -1) {
1078 if ((old_ruid != ruid) &&
1079 (current->euid != ruid) &&
1080 !capable(CAP_SETUID))
1084 if (euid != (uid_t) -1) {
1086 if ((old_ruid != euid) &&
1087 (current->euid != euid) &&
1088 (current->suid != euid) &&
1089 !capable(CAP_SETUID))
1093 if (new_ruid != old_ruid && set_user(new_ruid, new_euid != old_euid) < 0)
1096 if (new_euid != old_euid) {
1097 current->mm->dumpable = suid_dumpable;
1100 current->fsuid = current->euid = new_euid;
1101 if (ruid != (uid_t) -1 ||
1102 (euid != (uid_t) -1 && euid != old_ruid))
1103 current->suid = current->euid;
1104 current->fsuid = current->euid;
1106 key_fsuid_changed(current);
1107 proc_id_connector(current, PROC_EVENT_UID);
1109 return security_task_post_setuid(old_ruid, old_euid, old_suid, LSM_SETID_RE);
1115 * setuid() is implemented like SysV with SAVED_IDS
1117 * Note that SAVED_ID's is deficient in that a setuid root program
1118 * like sendmail, for example, cannot set its uid to be a normal
1119 * user and then switch back, because if you're root, setuid() sets
1120 * the saved uid too. If you don't like this, blame the bright people
1121 * in the POSIX committee and/or USG. Note that the BSD-style setreuid()
1122 * will allow a root program to temporarily drop privileges and be able to
1123 * regain them by swapping the real and effective uid.
1125 asmlinkage long sys_setuid(uid_t uid)
1127 int old_euid = current->euid;
1128 int old_ruid, old_suid, new_suid;
1131 retval = security_task_setuid(uid, (uid_t)-1, (uid_t)-1, LSM_SETID_ID);
1135 old_ruid = current->uid;
1136 old_suid = current->suid;
1137 new_suid = old_suid;
1139 if (capable(CAP_SETUID)) {
1140 if (uid != old_ruid && set_user(uid, old_euid != uid) < 0)
1143 } else if ((uid != current->uid) && (uid != new_suid))
1146 if (old_euid != uid) {
1147 current->mm->dumpable = suid_dumpable;
1150 current->fsuid = current->euid = uid;
1151 current->suid = new_suid;
1153 key_fsuid_changed(current);
1154 proc_id_connector(current, PROC_EVENT_UID);
1156 return security_task_post_setuid(old_ruid, old_euid, old_suid, LSM_SETID_ID);
1161 * This function implements a generic ability to update ruid, euid,
1162 * and suid. This allows you to implement the 4.4 compatible seteuid().
1164 asmlinkage long sys_setresuid(uid_t ruid, uid_t euid, uid_t suid)
1166 int old_ruid = current->uid;
1167 int old_euid = current->euid;
1168 int old_suid = current->suid;
1171 retval = security_task_setuid(ruid, euid, suid, LSM_SETID_RES);
1175 if (!capable(CAP_SETUID)) {
1176 if ((ruid != (uid_t) -1) && (ruid != current->uid) &&
1177 (ruid != current->euid) && (ruid != current->suid))
1179 if ((euid != (uid_t) -1) && (euid != current->uid) &&
1180 (euid != current->euid) && (euid != current->suid))
1182 if ((suid != (uid_t) -1) && (suid != current->uid) &&
1183 (suid != current->euid) && (suid != current->suid))
1186 if (ruid != (uid_t) -1) {
1187 if (ruid != current->uid && set_user(ruid, euid != current->euid) < 0)
1190 if (euid != (uid_t) -1) {
1191 if (euid != current->euid) {
1192 current->mm->dumpable = suid_dumpable;
1195 current->euid = euid;
1197 current->fsuid = current->euid;
1198 if (suid != (uid_t) -1)
1199 current->suid = suid;
1201 key_fsuid_changed(current);
1202 proc_id_connector(current, PROC_EVENT_UID);
1204 return security_task_post_setuid(old_ruid, old_euid, old_suid, LSM_SETID_RES);
1207 asmlinkage long sys_getresuid(uid_t __user *ruid, uid_t __user *euid, uid_t __user *suid)
1211 if (!(retval = put_user(current->uid, ruid)) &&
1212 !(retval = put_user(current->euid, euid)))
1213 retval = put_user(current->suid, suid);
1219 * Same as above, but for rgid, egid, sgid.
1221 asmlinkage long sys_setresgid(gid_t rgid, gid_t egid, gid_t sgid)
1225 retval = security_task_setgid(rgid, egid, sgid, LSM_SETID_RES);
1229 if (!capable(CAP_SETGID)) {
1230 if ((rgid != (gid_t) -1) && (rgid != current->gid) &&
1231 (rgid != current->egid) && (rgid != current->sgid))
1233 if ((egid != (gid_t) -1) && (egid != current->gid) &&
1234 (egid != current->egid) && (egid != current->sgid))
1236 if ((sgid != (gid_t) -1) && (sgid != current->gid) &&
1237 (sgid != current->egid) && (sgid != current->sgid))
1240 if (egid != (gid_t) -1) {
1241 if (egid != current->egid) {
1242 current->mm->dumpable = suid_dumpable;
1245 current->egid = egid;
1247 current->fsgid = current->egid;
1248 if (rgid != (gid_t) -1)
1249 current->gid = rgid;
1250 if (sgid != (gid_t) -1)
1251 current->sgid = sgid;
1253 key_fsgid_changed(current);
1254 proc_id_connector(current, PROC_EVENT_GID);
1258 asmlinkage long sys_getresgid(gid_t __user *rgid, gid_t __user *egid, gid_t __user *sgid)
1262 if (!(retval = put_user(current->gid, rgid)) &&
1263 !(retval = put_user(current->egid, egid)))
1264 retval = put_user(current->sgid, sgid);
1271 * "setfsuid()" sets the fsuid - the uid used for filesystem checks. This
1272 * is used for "access()" and for the NFS daemon (letting nfsd stay at
1273 * whatever uid it wants to). It normally shadows "euid", except when
1274 * explicitly set by setfsuid() or for access..
1276 asmlinkage long sys_setfsuid(uid_t uid)
1280 old_fsuid = current->fsuid;
1281 if (security_task_setuid(uid, (uid_t)-1, (uid_t)-1, LSM_SETID_FS))
1284 if (uid == current->uid || uid == current->euid ||
1285 uid == current->suid || uid == current->fsuid ||
1286 capable(CAP_SETUID)) {
1287 if (uid != old_fsuid) {
1288 current->mm->dumpable = suid_dumpable;
1291 current->fsuid = uid;
1294 key_fsuid_changed(current);
1295 proc_id_connector(current, PROC_EVENT_UID);
1297 security_task_post_setuid(old_fsuid, (uid_t)-1, (uid_t)-1, LSM_SETID_FS);
1303 * Samma på svenska..
1305 asmlinkage long sys_setfsgid(gid_t gid)
1309 old_fsgid = current->fsgid;
1310 if (security_task_setgid(gid, (gid_t)-1, (gid_t)-1, LSM_SETID_FS))
1313 if (gid == current->gid || gid == current->egid ||
1314 gid == current->sgid || gid == current->fsgid ||
1315 capable(CAP_SETGID)) {
1316 if (gid != old_fsgid) {
1317 current->mm->dumpable = suid_dumpable;
1320 current->fsgid = gid;
1321 key_fsgid_changed(current);
1322 proc_id_connector(current, PROC_EVENT_GID);
1327 asmlinkage long sys_times(struct tms __user * tbuf)
1330 * In the SMP world we might just be unlucky and have one of
1331 * the times increment as we use it. Since the value is an
1332 * atomically safe type this is just fine. Conceptually its
1333 * as if the syscall took an instant longer to occur.
1337 struct task_struct *tsk = current;
1338 struct task_struct *t;
1339 cputime_t utime, stime, cutime, cstime;
1341 spin_lock_irq(&tsk->sighand->siglock);
1342 utime = tsk->signal->utime;
1343 stime = tsk->signal->stime;
1346 utime = cputime_add(utime, t->utime);
1347 stime = cputime_add(stime, t->stime);
1351 cutime = tsk->signal->cutime;
1352 cstime = tsk->signal->cstime;
1353 spin_unlock_irq(&tsk->sighand->siglock);
1355 tmp.tms_utime = cputime_to_clock_t(utime);
1356 tmp.tms_stime = cputime_to_clock_t(stime);
1357 tmp.tms_cutime = cputime_to_clock_t(cutime);
1358 tmp.tms_cstime = cputime_to_clock_t(cstime);
1359 if (copy_to_user(tbuf, &tmp, sizeof(struct tms)))
1362 return (long) jiffies_64_to_clock_t(get_jiffies_64());
1366 * This needs some heavy checking ...
1367 * I just haven't the stomach for it. I also don't fully
1368 * understand sessions/pgrp etc. Let somebody who does explain it.
1370 * OK, I think I have the protection semantics right.... this is really
1371 * only important on a multi-user system anyway, to make sure one user
1372 * can't send a signal to a process owned by another. -TYT, 12/12/91
1374 * Auch. Had to add the 'did_exec' flag to conform completely to POSIX.
1378 asmlinkage long sys_setpgid(pid_t pid, pid_t pgid)
1380 struct task_struct *p;
1381 struct task_struct *group_leader = current->group_leader;
1386 pid = vx_map_pid(group_leader->pid);
1392 rpgid = vx_rmap_pid(pgid);
1394 /* From this point forward we keep holding onto the tasklist lock
1395 * so that our parent does not change from under us. -DaveM
1397 write_lock_irq(&tasklist_lock);
1400 p = find_task_by_pid(pid);
1405 if (!thread_group_leader(p))
1408 if (p->parent == group_leader) {
1410 if (process_session(p) != process_session(group_leader))
1417 if (p != group_leader)
1422 if (p->signal->leader)
1426 struct task_struct *g =
1427 find_task_by_pid_type(PIDTYPE_PGID, rpgid);
1429 if (!g || process_session(g) != process_session(group_leader))
1433 err = security_task_setpgid(p, rpgid);
1437 if (process_group(p) != rpgid) {
1438 detach_pid(p, PIDTYPE_PGID);
1439 p->signal->pgrp = rpgid;
1440 attach_pid(p, PIDTYPE_PGID, rpgid);
1445 /* All paths lead to here, thus we are safe. -DaveM */
1446 write_unlock_irq(&tasklist_lock);
1450 asmlinkage long sys_getpgid(pid_t pid)
1453 return vx_rmap_pid(process_group(current));
1456 struct task_struct *p;
1458 read_lock(&tasklist_lock);
1459 p = find_task_by_pid(pid);
1463 retval = security_task_getpgid(p);
1465 retval = vx_rmap_pid(process_group(p));
1467 read_unlock(&tasklist_lock);
1472 #ifdef __ARCH_WANT_SYS_GETPGRP
1474 asmlinkage long sys_getpgrp(void)
1476 /* SMP - assuming writes are word atomic this is fine */
1477 return process_group(current);
1482 asmlinkage long sys_getsid(pid_t pid)
1485 return process_session(current);
1488 struct task_struct *p;
1490 read_lock(&tasklist_lock);
1491 p = find_task_by_pid(pid);
1495 retval = security_task_getsid(p);
1497 retval = process_session(p);
1499 read_unlock(&tasklist_lock);
1504 asmlinkage long sys_setsid(void)
1506 struct task_struct *group_leader = current->group_leader;
1510 write_lock_irq(&tasklist_lock);
1512 /* Fail if I am already a session leader */
1513 if (group_leader->signal->leader)
1516 session = group_leader->pid;
1517 /* Fail if a process group id already exists that equals the
1518 * proposed session id.
1520 * Don't check if session id == 1 because kernel threads use this
1521 * session id and so the check will always fail and make it so
1522 * init cannot successfully call setsid.
1524 if (session > 1 && find_task_by_pid_type(PIDTYPE_PGID, session))
1527 group_leader->signal->leader = 1;
1528 __set_special_pids(session, session);
1530 spin_lock(&group_leader->sighand->siglock);
1531 group_leader->signal->tty = NULL;
1532 group_leader->signal->tty_old_pgrp = 0;
1533 spin_unlock(&group_leader->sighand->siglock);
1535 err = process_group(group_leader);
1537 write_unlock_irq(&tasklist_lock);
1542 * Supplementary group IDs
1545 /* init to 2 - one for init_task, one to ensure it is never freed */
1546 struct group_info init_groups = { .usage = ATOMIC_INIT(2) };
1548 struct group_info *groups_alloc(int gidsetsize)
1550 struct group_info *group_info;
1554 nblocks = (gidsetsize + NGROUPS_PER_BLOCK - 1) / NGROUPS_PER_BLOCK;
1555 /* Make sure we always allocate at least one indirect block pointer */
1556 nblocks = nblocks ? : 1;
1557 group_info = kmalloc(sizeof(*group_info) + nblocks*sizeof(gid_t *), GFP_USER);
1560 group_info->ngroups = gidsetsize;
1561 group_info->nblocks = nblocks;
1562 atomic_set(&group_info->usage, 1);
1564 if (gidsetsize <= NGROUPS_SMALL)
1565 group_info->blocks[0] = group_info->small_block;
1567 for (i = 0; i < nblocks; i++) {
1569 b = (void *)__get_free_page(GFP_USER);
1571 goto out_undo_partial_alloc;
1572 group_info->blocks[i] = b;
1577 out_undo_partial_alloc:
1579 free_page((unsigned long)group_info->blocks[i]);
1585 EXPORT_SYMBOL(groups_alloc);
1587 void groups_free(struct group_info *group_info)
1589 if (group_info->blocks[0] != group_info->small_block) {
1591 for (i = 0; i < group_info->nblocks; i++)
1592 free_page((unsigned long)group_info->blocks[i]);
1597 EXPORT_SYMBOL(groups_free);
1599 /* export the group_info to a user-space array */
1600 static int groups_to_user(gid_t __user *grouplist,
1601 struct group_info *group_info)
1604 int count = group_info->ngroups;
1606 for (i = 0; i < group_info->nblocks; i++) {
1607 int cp_count = min(NGROUPS_PER_BLOCK, count);
1608 int off = i * NGROUPS_PER_BLOCK;
1609 int len = cp_count * sizeof(*grouplist);
1611 if (copy_to_user(grouplist+off, group_info->blocks[i], len))
1619 /* fill a group_info from a user-space array - it must be allocated already */
1620 static int groups_from_user(struct group_info *group_info,
1621 gid_t __user *grouplist)
1624 int count = group_info->ngroups;
1626 for (i = 0; i < group_info->nblocks; i++) {
1627 int cp_count = min(NGROUPS_PER_BLOCK, count);
1628 int off = i * NGROUPS_PER_BLOCK;
1629 int len = cp_count * sizeof(*grouplist);
1631 if (copy_from_user(group_info->blocks[i], grouplist+off, len))
1639 /* a simple Shell sort */
1640 static void groups_sort(struct group_info *group_info)
1642 int base, max, stride;
1643 int gidsetsize = group_info->ngroups;
1645 for (stride = 1; stride < gidsetsize; stride = 3 * stride + 1)
1650 max = gidsetsize - stride;
1651 for (base = 0; base < max; base++) {
1653 int right = left + stride;
1654 gid_t tmp = GROUP_AT(group_info, right);
1656 while (left >= 0 && GROUP_AT(group_info, left) > tmp) {
1657 GROUP_AT(group_info, right) =
1658 GROUP_AT(group_info, left);
1662 GROUP_AT(group_info, right) = tmp;
1668 /* a simple bsearch */
1669 int groups_search(struct group_info *group_info, gid_t grp)
1671 unsigned int left, right;
1677 right = group_info->ngroups;
1678 while (left < right) {
1679 unsigned int mid = (left+right)/2;
1680 int cmp = grp - GROUP_AT(group_info, mid);
1691 /* validate and set current->group_info */
1692 int set_current_groups(struct group_info *group_info)
1695 struct group_info *old_info;
1697 retval = security_task_setgroups(group_info);
1701 groups_sort(group_info);
1702 get_group_info(group_info);
1705 old_info = current->group_info;
1706 current->group_info = group_info;
1707 task_unlock(current);
1709 put_group_info(old_info);
1714 EXPORT_SYMBOL(set_current_groups);
1716 asmlinkage long sys_getgroups(int gidsetsize, gid_t __user *grouplist)
1721 * SMP: Nobody else can change our grouplist. Thus we are
1728 /* no need to grab task_lock here; it cannot change */
1729 i = current->group_info->ngroups;
1731 if (i > gidsetsize) {
1735 if (groups_to_user(grouplist, current->group_info)) {
1745 * SMP: Our groups are copy-on-write. We can set them safely
1746 * without another task interfering.
1749 asmlinkage long sys_setgroups(int gidsetsize, gid_t __user *grouplist)
1751 struct group_info *group_info;
1754 if (!capable(CAP_SETGID))
1756 if ((unsigned)gidsetsize > NGROUPS_MAX)
1759 group_info = groups_alloc(gidsetsize);
1762 retval = groups_from_user(group_info, grouplist);
1764 put_group_info(group_info);
1768 retval = set_current_groups(group_info);
1769 put_group_info(group_info);
1775 * Check whether we're fsgid/egid or in the supplemental group..
1777 int in_group_p(gid_t grp)
1780 if (grp != current->fsgid)
1781 retval = groups_search(current->group_info, grp);
1785 EXPORT_SYMBOL(in_group_p);
1787 int in_egroup_p(gid_t grp)
1790 if (grp != current->egid)
1791 retval = groups_search(current->group_info, grp);
1795 EXPORT_SYMBOL(in_egroup_p);
1797 DECLARE_RWSEM(uts_sem);
1799 EXPORT_SYMBOL(uts_sem);
1801 asmlinkage long sys_newuname(struct new_utsname __user * name)
1805 down_read(&uts_sem);
1806 if (copy_to_user(name, utsname(), sizeof *name))
1812 asmlinkage long sys_sethostname(char __user *name, int len)
1815 char tmp[__NEW_UTS_LEN];
1817 if (!vx_capable(CAP_SYS_ADMIN, VXC_SET_UTSNAME))
1819 if (len < 0 || len > __NEW_UTS_LEN)
1821 down_write(&uts_sem);
1823 if (!copy_from_user(tmp, name, len)) {
1824 memcpy(utsname()->nodename, tmp, len);
1825 utsname()->nodename[len] = 0;
1832 #ifdef __ARCH_WANT_SYS_GETHOSTNAME
1834 asmlinkage long sys_gethostname(char __user *name, int len)
1840 down_read(&uts_sem);
1841 i = 1 + strlen(utsname()->nodename);
1845 if (copy_to_user(name, utsname()->nodename, i))
1854 * Only setdomainname; getdomainname can be implemented by calling
1857 asmlinkage long sys_setdomainname(char __user *name, int len)
1860 char tmp[__NEW_UTS_LEN];
1862 if (!vx_capable(CAP_SYS_ADMIN, VXC_SET_UTSNAME))
1864 if (len < 0 || len > __NEW_UTS_LEN)
1867 down_write(&uts_sem);
1869 if (!copy_from_user(tmp, name, len)) {
1870 memcpy(utsname()->domainname, tmp, len);
1871 utsname()->domainname[len] = 0;
1878 asmlinkage long sys_getrlimit(unsigned int resource, struct rlimit __user *rlim)
1880 if (resource >= RLIM_NLIMITS)
1883 struct rlimit value;
1884 task_lock(current->group_leader);
1885 value = current->signal->rlim[resource];
1886 task_unlock(current->group_leader);
1887 return copy_to_user(rlim, &value, sizeof(*rlim)) ? -EFAULT : 0;
1891 #ifdef __ARCH_WANT_SYS_OLD_GETRLIMIT
1894 * Back compatibility for getrlimit. Needed for some apps.
1897 asmlinkage long sys_old_getrlimit(unsigned int resource, struct rlimit __user *rlim)
1900 if (resource >= RLIM_NLIMITS)
1903 task_lock(current->group_leader);
1904 x = current->signal->rlim[resource];
1905 task_unlock(current->group_leader);
1906 if (x.rlim_cur > 0x7FFFFFFF)
1907 x.rlim_cur = 0x7FFFFFFF;
1908 if (x.rlim_max > 0x7FFFFFFF)
1909 x.rlim_max = 0x7FFFFFFF;
1910 return copy_to_user(rlim, &x, sizeof(x))?-EFAULT:0;
1915 asmlinkage long sys_setrlimit(unsigned int resource, struct rlimit __user *rlim)
1917 struct rlimit new_rlim, *old_rlim;
1918 unsigned long it_prof_secs;
1921 if (resource >= RLIM_NLIMITS)
1923 if (copy_from_user(&new_rlim, rlim, sizeof(*rlim)))
1925 if (new_rlim.rlim_cur > new_rlim.rlim_max)
1927 old_rlim = current->signal->rlim + resource;
1928 if ((new_rlim.rlim_max > old_rlim->rlim_max) &&
1929 !vx_capable(CAP_SYS_RESOURCE, VXC_SET_RLIMIT))
1931 if (resource == RLIMIT_NOFILE && new_rlim.rlim_max > NR_OPEN)
1934 retval = security_task_setrlimit(resource, &new_rlim);
1938 task_lock(current->group_leader);
1939 *old_rlim = new_rlim;
1940 task_unlock(current->group_leader);
1942 if (resource != RLIMIT_CPU)
1946 * RLIMIT_CPU handling. Note that the kernel fails to return an error
1947 * code if it rejected the user's attempt to set RLIMIT_CPU. This is a
1948 * very long-standing error, and fixing it now risks breakage of
1949 * applications, so we live with it
1951 if (new_rlim.rlim_cur == RLIM_INFINITY)
1954 it_prof_secs = cputime_to_secs(current->signal->it_prof_expires);
1955 if (it_prof_secs == 0 || new_rlim.rlim_cur <= it_prof_secs) {
1956 unsigned long rlim_cur = new_rlim.rlim_cur;
1959 if (rlim_cur == 0) {
1961 * The caller is asking for an immediate RLIMIT_CPU
1962 * expiry. But we use the zero value to mean "it was
1963 * never set". So let's cheat and make it one second
1968 cputime = secs_to_cputime(rlim_cur);
1969 read_lock(&tasklist_lock);
1970 spin_lock_irq(¤t->sighand->siglock);
1971 set_process_cpu_timer(current, CPUCLOCK_PROF, &cputime, NULL);
1972 spin_unlock_irq(¤t->sighand->siglock);
1973 read_unlock(&tasklist_lock);
1980 * It would make sense to put struct rusage in the task_struct,
1981 * except that would make the task_struct be *really big*. After
1982 * task_struct gets moved into malloc'ed memory, it would
1983 * make sense to do this. It will make moving the rest of the information
1984 * a lot simpler! (Which we're not doing right now because we're not
1985 * measuring them yet).
1987 * When sampling multiple threads for RUSAGE_SELF, under SMP we might have
1988 * races with threads incrementing their own counters. But since word
1989 * reads are atomic, we either get new values or old values and we don't
1990 * care which for the sums. We always take the siglock to protect reading
1991 * the c* fields from p->signal from races with exit.c updating those
1992 * fields when reaping, so a sample either gets all the additions of a
1993 * given child after it's reaped, or none so this sample is before reaping.
1996 * We need to take the siglock for CHILDEREN, SELF and BOTH
1997 * for the cases current multithreaded, non-current single threaded
1998 * non-current multithreaded. Thread traversal is now safe with
2000 * Strictly speaking, we donot need to take the siglock if we are current and
2001 * single threaded, as no one else can take our signal_struct away, no one
2002 * else can reap the children to update signal->c* counters, and no one else
2003 * can race with the signal-> fields. If we do not take any lock, the
2004 * signal-> fields could be read out of order while another thread was just
2005 * exiting. So we should place a read memory barrier when we avoid the lock.
2006 * On the writer side, write memory barrier is implied in __exit_signal
2007 * as __exit_signal releases the siglock spinlock after updating the signal->
2008 * fields. But we don't do this yet to keep things simple.
2012 static void k_getrusage(struct task_struct *p, int who, struct rusage *r)
2014 struct task_struct *t;
2015 unsigned long flags;
2016 cputime_t utime, stime;
2018 memset((char *) r, 0, sizeof *r);
2019 utime = stime = cputime_zero;
2022 if (!lock_task_sighand(p, &flags)) {
2029 case RUSAGE_CHILDREN:
2030 utime = p->signal->cutime;
2031 stime = p->signal->cstime;
2032 r->ru_nvcsw = p->signal->cnvcsw;
2033 r->ru_nivcsw = p->signal->cnivcsw;
2034 r->ru_minflt = p->signal->cmin_flt;
2035 r->ru_majflt = p->signal->cmaj_flt;
2037 if (who == RUSAGE_CHILDREN)
2041 utime = cputime_add(utime, p->signal->utime);
2042 stime = cputime_add(stime, p->signal->stime);
2043 r->ru_nvcsw += p->signal->nvcsw;
2044 r->ru_nivcsw += p->signal->nivcsw;
2045 r->ru_minflt += p->signal->min_flt;
2046 r->ru_majflt += p->signal->maj_flt;
2049 utime = cputime_add(utime, t->utime);
2050 stime = cputime_add(stime, t->stime);
2051 r->ru_nvcsw += t->nvcsw;
2052 r->ru_nivcsw += t->nivcsw;
2053 r->ru_minflt += t->min_flt;
2054 r->ru_majflt += t->maj_flt;
2063 unlock_task_sighand(p, &flags);
2066 cputime_to_timeval(utime, &r->ru_utime);
2067 cputime_to_timeval(stime, &r->ru_stime);
2070 int getrusage(struct task_struct *p, int who, struct rusage __user *ru)
2073 k_getrusage(p, who, &r);
2074 return copy_to_user(ru, &r, sizeof(r)) ? -EFAULT : 0;
2077 asmlinkage long sys_getrusage(int who, struct rusage __user *ru)
2079 if (who != RUSAGE_SELF && who != RUSAGE_CHILDREN)
2081 return getrusage(current, who, ru);
2084 asmlinkage long sys_umask(int mask)
2086 mask = xchg(¤t->fs->umask, mask & S_IRWXUGO);
2090 asmlinkage long sys_prctl(int option, unsigned long arg2, unsigned long arg3,
2091 unsigned long arg4, unsigned long arg5)
2095 error = security_task_prctl(option, arg2, arg3, arg4, arg5);
2100 case PR_SET_PDEATHSIG:
2101 if (!valid_signal(arg2)) {
2105 current->pdeath_signal = arg2;
2107 case PR_GET_PDEATHSIG:
2108 error = put_user(current->pdeath_signal, (int __user *)arg2);
2110 case PR_GET_DUMPABLE:
2111 error = current->mm->dumpable;
2113 case PR_SET_DUMPABLE:
2114 if (arg2 < 0 || arg2 > 1) {
2118 current->mm->dumpable = arg2;
2121 case PR_SET_UNALIGN:
2122 error = SET_UNALIGN_CTL(current, arg2);
2124 case PR_GET_UNALIGN:
2125 error = GET_UNALIGN_CTL(current, arg2);
2128 error = SET_FPEMU_CTL(current, arg2);
2131 error = GET_FPEMU_CTL(current, arg2);
2134 error = SET_FPEXC_CTL(current, arg2);
2137 error = GET_FPEXC_CTL(current, arg2);
2140 error = PR_TIMING_STATISTICAL;
2143 if (arg2 == PR_TIMING_STATISTICAL)
2149 case PR_GET_KEEPCAPS:
2150 if (current->keep_capabilities)
2153 case PR_SET_KEEPCAPS:
2154 if (arg2 != 0 && arg2 != 1) {
2158 current->keep_capabilities = arg2;
2161 struct task_struct *me = current;
2162 unsigned char ncomm[sizeof(me->comm)];
2164 ncomm[sizeof(me->comm)-1] = 0;
2165 if (strncpy_from_user(ncomm, (char __user *)arg2,
2166 sizeof(me->comm)-1) < 0)
2168 set_task_comm(me, ncomm);
2172 struct task_struct *me = current;
2173 unsigned char tcomm[sizeof(me->comm)];
2175 get_task_comm(tcomm, me);
2176 if (copy_to_user((char __user *)arg2, tcomm, sizeof(tcomm)))
2181 error = GET_ENDIAN(current, arg2);
2184 error = SET_ENDIAN(current, arg2);
2194 asmlinkage long sys_getcpu(unsigned __user *cpup, unsigned __user *nodep,
2195 struct getcpu_cache __user *cache)
2198 int cpu = raw_smp_processor_id();
2200 err |= put_user(cpu, cpup);
2202 err |= put_user(cpu_to_node(cpu), nodep);
2205 * The cache is not needed for this implementation,
2206 * but make sure user programs pass something
2207 * valid. vsyscall implementations can instead make
2208 * good use of the cache. Only use t0 and t1 because
2209 * these are available in both 32bit and 64bit ABI (no
2210 * need for a compat_getcpu). 32bit has enough
2213 unsigned long t0, t1;
2214 get_user(t0, &cache->blob[0]);
2215 get_user(t1, &cache->blob[1]);
2218 put_user(t0, &cache->blob[0]);
2219 put_user(t1, &cache->blob[1]);
2221 return err ? -EFAULT : 0;