Removed CKRM code base from kernel tree.
[linux-2.6.git] / kernel / sys.c
1 /*
2  *  linux/kernel/sys.c
3  *
4  *  Copyright (C) 1991, 1992  Linus Torvalds
5  */
6
7 #include <linux/config.h>
8 #include <linux/module.h>
9 #include <linux/mm.h>
10 #include <linux/utsname.h>
11 #include <linux/mman.h>
12 #include <linux/smp_lock.h>
13 #include <linux/notifier.h>
14 #include <linux/kmod.h>
15 #include <linux/reboot.h>
16 #include <linux/prctl.h>
17 #include <linux/init.h>
18 #include <linux/highuid.h>
19 #include <linux/fs.h>
20 #include <linux/kernel.h>
21 #include <linux/kexec.h>
22 #include <linux/workqueue.h>
23 #include <linux/device.h>
24 #include <linux/key.h>
25 #include <linux/times.h>
26 #include <linux/security.h>
27 #include <linux/dcookies.h>
28 #include <linux/suspend.h>
29 #include <linux/tty.h>
30 #include <linux/vs_cvirt.h>
31 #include <linux/compat.h>
32 #include <linux/syscalls.h>
33
34 #include <asm/uaccess.h>
35 #include <asm/io.h>
36 #include <asm/unistd.h>
37
38 #ifndef SET_UNALIGN_CTL
39 # define SET_UNALIGN_CTL(a,b)   (-EINVAL)
40 #endif
41 #ifndef GET_UNALIGN_CTL
42 # define GET_UNALIGN_CTL(a,b)   (-EINVAL)
43 #endif
44 #ifndef SET_FPEMU_CTL
45 # define SET_FPEMU_CTL(a,b)     (-EINVAL)
46 #endif
47 #ifndef GET_FPEMU_CTL
48 # define GET_FPEMU_CTL(a,b)     (-EINVAL)
49 #endif
50 #ifndef SET_FPEXC_CTL
51 # define SET_FPEXC_CTL(a,b)     (-EINVAL)
52 #endif
53 #ifndef GET_FPEXC_CTL
54 # define GET_FPEXC_CTL(a,b)     (-EINVAL)
55 #endif
56
57 /*
58  * this is where the system-wide overflow UID and GID are defined, for
59  * architectures that now have 32-bit UID/GID but didn't in the past
60  */
61
62 int overflowuid = DEFAULT_OVERFLOWUID;
63 int overflowgid = DEFAULT_OVERFLOWGID;
64
65 #ifdef CONFIG_UID16
66 EXPORT_SYMBOL(overflowuid);
67 EXPORT_SYMBOL(overflowgid);
68 #endif
69
70 /*
71  * the same as above, but for filesystems which can only store a 16-bit
72  * UID and GID. as such, this is needed on all architectures
73  */
74
75 int fs_overflowuid = DEFAULT_FS_OVERFLOWUID;
76 int fs_overflowgid = DEFAULT_FS_OVERFLOWUID;
77
78 EXPORT_SYMBOL(fs_overflowuid);
79 EXPORT_SYMBOL(fs_overflowgid);
80
81 /*
82  * this indicates whether you can reboot with ctrl-alt-del: the default is yes
83  */
84
85 int C_A_D = 1;
86 int cad_pid = 1;
87
88 /*
89  *      Notifier list for kernel code which wants to be called
90  *      at shutdown. This is used to stop any idling DMA operations
91  *      and the like. 
92  */
93
94 static struct notifier_block *reboot_notifier_list;
95 rwlock_t notifier_lock = RW_LOCK_UNLOCKED;
96
97 /**
98  *      notifier_chain_register - Add notifier to a notifier chain
99  *      @list: Pointer to root list pointer
100  *      @n: New entry in notifier chain
101  *
102  *      Adds a notifier to a notifier chain.
103  *
104  *      Currently always returns zero.
105  */
106  
107 int notifier_chain_register(struct notifier_block **list, struct notifier_block *n)
108 {
109         write_lock(&notifier_lock);
110         while(*list)
111         {
112                 if(n->priority > (*list)->priority)
113                         break;
114                 list= &((*list)->next);
115         }
116         n->next = *list;
117         *list=n;
118         write_unlock(&notifier_lock);
119         return 0;
120 }
121
122 EXPORT_SYMBOL(notifier_chain_register);
123
124 /**
125  *      notifier_chain_unregister - Remove notifier from a notifier chain
126  *      @nl: Pointer to root list pointer
127  *      @n: New entry in notifier chain
128  *
129  *      Removes a notifier from a notifier chain.
130  *
131  *      Returns zero on success, or %-ENOENT on failure.
132  */
133  
134 int notifier_chain_unregister(struct notifier_block **nl, struct notifier_block *n)
135 {
136         write_lock(&notifier_lock);
137         while((*nl)!=NULL)
138         {
139                 if((*nl)==n)
140                 {
141                         *nl=n->next;
142                         write_unlock(&notifier_lock);
143                         return 0;
144                 }
145                 nl=&((*nl)->next);
146         }
147         write_unlock(&notifier_lock);
148         return -ENOENT;
149 }
150
151 EXPORT_SYMBOL(notifier_chain_unregister);
152
153 /**
154  *      notifier_call_chain - Call functions in a notifier chain
155  *      @n: Pointer to root pointer of notifier chain
156  *      @val: Value passed unmodified to notifier function
157  *      @v: Pointer passed unmodified to notifier function
158  *
159  *      Calls each function in a notifier chain in turn.
160  *
161  *      If the return value of the notifier can be and'd
162  *      with %NOTIFY_STOP_MASK, then notifier_call_chain
163  *      will return immediately, with the return value of
164  *      the notifier function which halted execution.
165  *      Otherwise, the return value is the return value
166  *      of the last notifier function called.
167  */
168  
169 int notifier_call_chain(struct notifier_block **n, unsigned long val, void *v)
170 {
171         int ret=NOTIFY_DONE;
172         struct notifier_block *nb = *n;
173
174         while(nb)
175         {
176                 ret=nb->notifier_call(nb,val,v);
177                 if(ret&NOTIFY_STOP_MASK)
178                 {
179                         return ret;
180                 }
181                 nb=nb->next;
182         }
183         return ret;
184 }
185
186 EXPORT_SYMBOL(notifier_call_chain);
187
188 /**
189  *      register_reboot_notifier - Register function to be called at reboot time
190  *      @nb: Info about notifier function to be called
191  *
192  *      Registers a function with the list of functions
193  *      to be called at reboot time.
194  *
195  *      Currently always returns zero, as notifier_chain_register
196  *      always returns zero.
197  */
198  
199 int register_reboot_notifier(struct notifier_block * nb)
200 {
201         return notifier_chain_register(&reboot_notifier_list, nb);
202 }
203
204 EXPORT_SYMBOL(register_reboot_notifier);
205
206 /**
207  *      unregister_reboot_notifier - Unregister previously registered reboot notifier
208  *      @nb: Hook to be unregistered
209  *
210  *      Unregisters a previously registered reboot
211  *      notifier function.
212  *
213  *      Returns zero on success, or %-ENOENT on failure.
214  */
215  
216 int unregister_reboot_notifier(struct notifier_block * nb)
217 {
218         return notifier_chain_unregister(&reboot_notifier_list, nb);
219 }
220
221 EXPORT_SYMBOL(unregister_reboot_notifier);
222 static int set_one_prio(struct task_struct *p, int niceval, int error)
223 {
224         int no_nice;
225
226         if (p->uid != current->euid &&
227                 p->uid != current->uid && !capable(CAP_SYS_NICE)) {
228                 error = -EPERM;
229                 goto out;
230         }
231         if (niceval < task_nice(p) && !capable(CAP_SYS_NICE)) {
232                 if (vx_flags(VXF_IGNEG_NICE, 0))
233                         error = 0;
234                 else
235                         error = -EACCES;
236                 goto out;
237         }
238         no_nice = security_task_setnice(p, niceval);
239         if (no_nice) {
240                 error = no_nice;
241                 goto out;
242         }
243         if (error == -ESRCH)
244                 error = 0;
245         set_user_nice(p, niceval);
246 out:
247         return error;
248 }
249
250 asmlinkage long sys_setpriority(int which, int who, int niceval)
251 {
252         struct task_struct *g, *p;
253         struct user_struct *user;
254         int error = -EINVAL;
255
256         if (which > 2 || which < 0)
257                 goto out;
258
259         /* normalize: avoid signed division (rounding problems) */
260         error = -ESRCH;
261         if (niceval < -20)
262                 niceval = -20;
263         if (niceval > 19)
264                 niceval = 19;
265
266         read_lock(&tasklist_lock);
267         switch (which) {
268                 case PRIO_PROCESS:
269                         if (!who)
270                                 who = current->pid;
271                         p = find_task_by_pid(who);
272                         if (p)
273                                 error = set_one_prio(p, niceval, error);
274                         break;
275                 case PRIO_PGRP:
276                         if (!who)
277                                 who = process_group(current);
278                         do_each_task_pid(who, PIDTYPE_PGID, p) {
279                                 error = set_one_prio(p, niceval, error);
280                         } while_each_task_pid(who, PIDTYPE_PGID, p);
281                         break;
282                 case PRIO_USER:
283                         user = current->user;
284                         if (!who)
285                                 who = current->uid;
286                         else
287                                 if ((who != current->uid) &&
288                                         !(user = find_user(vx_current_xid(), who)))
289                                         goto out_unlock;        /* No processes for this user */
290
291                         do_each_thread(g, p)
292                                 if (p->uid == who)
293                                         error = set_one_prio(p, niceval, error);
294                         while_each_thread(g, p);
295                         if (who != current->uid)
296                                 free_uid(user);         /* For find_user() */
297                         break;
298         }
299 out_unlock:
300         read_unlock(&tasklist_lock);
301 out:
302         return error;
303 }
304
305 /*
306  * Ugh. To avoid negative return values, "getpriority()" will
307  * not return the normal nice-value, but a negated value that
308  * has been offset by 20 (ie it returns 40..1 instead of -20..19)
309  * to stay compatible.
310  */
311 asmlinkage long sys_getpriority(int which, int who)
312 {
313         struct task_struct *g, *p;
314         struct user_struct *user;
315         long niceval, retval = -ESRCH;
316
317         if (which > 2 || which < 0)
318                 return -EINVAL;
319
320         read_lock(&tasklist_lock);
321         switch (which) {
322                 case PRIO_PROCESS:
323                         if (!who)
324                                 who = current->pid;
325                         p = find_task_by_pid(who);
326                         if (p) {
327                                 niceval = 20 - task_nice(p);
328                                 if (niceval > retval)
329                                         retval = niceval;
330                         }
331                         break;
332                 case PRIO_PGRP:
333                         if (!who)
334                                 who = process_group(current);
335                         do_each_task_pid(who, PIDTYPE_PGID, p) {
336                                 niceval = 20 - task_nice(p);
337                                 if (niceval > retval)
338                                         retval = niceval;
339                         } while_each_task_pid(who, PIDTYPE_PGID, p);
340                         break;
341                 case PRIO_USER:
342                         user = current->user;
343                         if (!who)
344                                 who = current->uid;
345                         else
346                                 if ((who != current->uid) &&
347                                         !(user = find_user(vx_current_xid(), who)))
348                                         goto out_unlock;        /* No processes for this user */
349
350                         do_each_thread(g, p)
351                                 if (p->uid == who) {
352                                         niceval = 20 - task_nice(p);
353                                         if (niceval > retval)
354                                                 retval = niceval;
355                                 }
356                         while_each_thread(g, p);
357                         if (who != current->uid)
358                                 free_uid(user);         /* for find_user() */
359                         break;
360         }
361 out_unlock:
362         read_unlock(&tasklist_lock);
363
364         key_fsgid_changed(current);
365         return 0;
366 }
367
368 long vs_reboot(unsigned int, void *);
369
370 /*
371  * Reboot system call: for obvious reasons only root may call it,
372  * and even root needs to set up some magic numbers in the registers
373  * so that some mistake won't make this reboot the whole machine.
374  * You can also set the meaning of the ctrl-alt-del-key here.
375  *
376  * reboot doesn't sync: do that yourself before calling this.
377  */
378 asmlinkage long sys_reboot(int magic1, int magic2, unsigned int cmd, void __user * arg)
379 {
380         char buffer[256];
381
382         /* We only trust the superuser with rebooting the system. */
383         if (!capable(CAP_SYS_BOOT))
384                 return -EPERM;
385
386         /* For safety, we require "magic" arguments. */
387         if (magic1 != LINUX_REBOOT_MAGIC1 ||
388             (magic2 != LINUX_REBOOT_MAGIC2 &&
389                         magic2 != LINUX_REBOOT_MAGIC2A &&
390                         magic2 != LINUX_REBOOT_MAGIC2B &&
391                         magic2 != LINUX_REBOOT_MAGIC2C))
392                 return -EINVAL;
393
394         if (!vx_check(0, VX_ADMIN|VX_WATCH))
395                 return vs_reboot(cmd, arg);
396
397         lock_kernel();
398         switch (cmd) {
399         case LINUX_REBOOT_CMD_RESTART:
400                 notifier_call_chain(&reboot_notifier_list, SYS_RESTART, NULL);
401                 system_state = SYSTEM_RESTART;
402                 device_shutdown();
403                 printk(KERN_EMERG "Restarting system.\n");
404                 machine_restart(NULL);
405                 break;
406
407         case LINUX_REBOOT_CMD_CAD_ON:
408                 C_A_D = 1;
409                 break;
410
411         case LINUX_REBOOT_CMD_CAD_OFF:
412                 C_A_D = 0;
413                 break;
414
415         case LINUX_REBOOT_CMD_HALT:
416                 notifier_call_chain(&reboot_notifier_list, SYS_HALT, NULL);
417                 system_state = SYSTEM_HALT;
418                 device_shutdown();
419                 printk(KERN_EMERG "System halted.\n");
420                 machine_halt();
421                 unlock_kernel();
422                 do_exit(0);
423                 break;
424
425         case LINUX_REBOOT_CMD_POWER_OFF:
426                 notifier_call_chain(&reboot_notifier_list, SYS_POWER_OFF, NULL);
427                 system_state = SYSTEM_POWER_OFF;
428                 device_shutdown();
429                 printk(KERN_EMERG "Power down.\n");
430                 machine_power_off();
431                 unlock_kernel();
432                 do_exit(0);
433                 break;
434
435         case LINUX_REBOOT_CMD_RESTART2:
436                 if (strncpy_from_user(&buffer[0], arg, sizeof(buffer) - 1) < 0) {
437                         unlock_kernel();
438                         return -EFAULT;
439                 }
440                 buffer[sizeof(buffer) - 1] = '\0';
441
442                 notifier_call_chain(&reboot_notifier_list, SYS_RESTART, buffer);
443                 system_state = SYSTEM_RESTART;
444                 device_shutdown();
445                 printk(KERN_EMERG "Restarting system with command '%s'.\n", buffer);
446                 machine_restart(buffer);
447                 break;
448
449 #ifdef CONFIG_KEXEC
450         case LINUX_REBOOT_CMD_KEXEC:
451         {
452                 struct kimage *image;
453                 image = xchg(&kexec_image, 0);
454                 if (!image) {
455                         unlock_kernel();
456                         return -EINVAL;
457                 }
458                 notifier_call_chain(&reboot_notifier_list, SYS_RESTART, NULL);
459                 system_state = SYSTEM_RESTART;
460                 device_shutdown();
461                 system_state = SYSTEM_BOOTING;
462                 printk(KERN_EMERG "Starting new kernel\n");
463                 machine_shutdown();
464                 machine_kexec(image);
465                 break;
466         }
467 #endif
468
469 #ifdef CONFIG_SOFTWARE_SUSPEND
470         case LINUX_REBOOT_CMD_SW_SUSPEND:
471                 {
472                         int ret = software_suspend();
473                         unlock_kernel();
474                         return ret;
475                 }
476 #endif
477
478         default:
479                 unlock_kernel();
480                 return -EINVAL;
481         }
482         unlock_kernel();
483         return 0;
484 }
485
486 static void deferred_cad(void *dummy)
487 {
488         notifier_call_chain(&reboot_notifier_list, SYS_RESTART, NULL);
489         machine_restart(NULL);
490 }
491
492 /*
493  * This function gets called by ctrl-alt-del - ie the keyboard interrupt.
494  * As it's called within an interrupt, it may NOT sync: the only choice
495  * is whether to reboot at once, or just ignore the ctrl-alt-del.
496  */
497 void ctrl_alt_del(void)
498 {
499         static DECLARE_WORK(cad_work, deferred_cad, NULL);
500
501         if (C_A_D)
502                 schedule_work(&cad_work);
503         else
504                 kill_proc(cad_pid, SIGINT, 1);
505 }
506         
507
508
509 /*
510  * Unprivileged users may change the real gid to the effective gid
511  * or vice versa.  (BSD-style)
512  *
513  * If you set the real gid at all, or set the effective gid to a value not
514  * equal to the real gid, then the saved gid is set to the new effective gid.
515  *
516  * This makes it possible for a setgid program to completely drop its
517  * privileges, which is often a useful assertion to make when you are doing
518  * a security audit over a program.
519  *
520  * The general idea is that a program which uses just setregid() will be
521  * 100% compatible with BSD.  A program which uses just setgid() will be
522  * 100% compatible with POSIX with saved IDs. 
523  *
524  * SMP: There are not races, the GIDs are checked only by filesystem
525  *      operations (as far as semantic preservation is concerned).
526  */
527 asmlinkage long sys_setregid(gid_t rgid, gid_t egid)
528 {
529         int old_rgid = current->gid;
530         int old_egid = current->egid;
531         int new_rgid = old_rgid;
532         int new_egid = old_egid;
533         int retval;
534
535         retval = security_task_setgid(rgid, egid, (gid_t)-1, LSM_SETID_RE);
536         if (retval)
537                 return retval;
538
539         if (rgid != (gid_t) -1) {
540                 if ((old_rgid == rgid) ||
541                     (current->egid==rgid) ||
542                     capable(CAP_SETGID))
543                         new_rgid = rgid;
544                 else
545                         return -EPERM;
546         }
547         if (egid != (gid_t) -1) {
548                 if ((old_rgid == egid) ||
549                     (current->egid == egid) ||
550                     (current->sgid == egid) ||
551                     capable(CAP_SETGID))
552                         new_egid = egid;
553                 else {
554                         return -EPERM;
555                 }
556         }
557         if (new_egid != old_egid)
558         {
559                 current->mm->dumpable = suid_dumpable;
560                 wmb();
561         }
562         if (rgid != (gid_t) -1 ||
563             (egid != (gid_t) -1 && egid != old_rgid))
564                 current->sgid = new_egid;
565         current->fsgid = new_egid;
566         current->egid = new_egid;
567         current->gid = new_rgid;
568
569         key_fsgid_changed(current);
570         return 0;
571 }
572
573 /*
574  * setgid() is implemented like SysV w/ SAVED_IDS 
575  *
576  * SMP: Same implicit races as above.
577  */
578 asmlinkage long sys_setgid(gid_t gid)
579 {
580         int old_egid = current->egid;
581         int retval;
582
583         retval = security_task_setgid(gid, (gid_t)-1, (gid_t)-1, LSM_SETID_ID);
584         if (retval)
585                 return retval;
586
587         if (capable(CAP_SETGID))
588         {
589                 if(old_egid != gid)
590                 {
591                         current->mm->dumpable = suid_dumpable;
592                         wmb();
593                 }
594                 current->gid = current->egid = current->sgid = current->fsgid = gid;
595         }
596         else if ((gid == current->gid) || (gid == current->sgid))
597         {
598                 if(old_egid != gid)
599                 {
600                         current->mm->dumpable = suid_dumpable;
601                         wmb();
602                 }
603                 current->egid = current->fsgid = gid;
604         }
605         else
606                 return -EPERM;
607
608         key_fsgid_changed(current);
609
610         return 0;
611 }
612   
613 static int set_user(uid_t new_ruid, int dumpclear)
614 {
615         struct user_struct *new_user;
616
617         new_user = alloc_uid(vx_current_xid(), new_ruid);
618         if (!new_user)
619                 return -EAGAIN;
620
621         if (atomic_read(&new_user->processes) >=
622                                 current->signal->rlim[RLIMIT_NPROC].rlim_cur &&
623                         new_user != &root_user) {
624                 free_uid(new_user);
625                 return -EAGAIN;
626         }
627
628         switch_uid(new_user);
629
630         if(dumpclear)
631         {
632                 current->mm->dumpable = suid_dumpable;
633                 wmb();
634         }
635         current->uid = new_ruid;
636         return 0;
637 }
638
639 /*
640  * Unprivileged users may change the real uid to the effective uid
641  * or vice versa.  (BSD-style)
642  *
643  * If you set the real uid at all, or set the effective uid to a value not
644  * equal to the real uid, then the saved uid is set to the new effective uid.
645  *
646  * This makes it possible for a setuid program to completely drop its
647  * privileges, which is often a useful assertion to make when you are doing
648  * a security audit over a program.
649  *
650  * The general idea is that a program which uses just setreuid() will be
651  * 100% compatible with BSD.  A program which uses just setuid() will be
652  * 100% compatible with POSIX with saved IDs. 
653  */
654 asmlinkage long sys_setreuid(uid_t ruid, uid_t euid)
655 {
656         int old_ruid, old_euid, old_suid, new_ruid, new_euid;
657         int retval;
658
659         retval = security_task_setuid(ruid, euid, (uid_t)-1, LSM_SETID_RE);
660         if (retval)
661                 return retval;
662
663         new_ruid = old_ruid = current->uid;
664         new_euid = old_euid = current->euid;
665         old_suid = current->suid;
666
667         if (ruid != (uid_t) -1) {
668                 new_ruid = ruid;
669                 if ((old_ruid != ruid) &&
670                     (current->euid != ruid) &&
671                     !capable(CAP_SETUID))
672                         return -EPERM;
673         }
674
675         if (euid != (uid_t) -1) {
676                 new_euid = euid;
677                 if ((old_ruid != euid) &&
678                     (current->euid != euid) &&
679                     (current->suid != euid) &&
680                     !capable(CAP_SETUID))
681                         return -EPERM;
682         }
683
684         if (new_ruid != old_ruid && set_user(new_ruid, new_euid != old_euid) < 0)
685                 return -EAGAIN;
686
687         if (new_euid != old_euid)
688         {
689                 current->mm->dumpable = suid_dumpable;
690                 wmb();
691         }
692         current->fsuid = current->euid = new_euid;
693         if (ruid != (uid_t) -1 ||
694             (euid != (uid_t) -1 && euid != old_ruid))
695                 current->suid = current->euid;
696         current->fsuid = current->euid;
697
698         key_fsuid_changed(current);
699
700         return security_task_post_setuid(old_ruid, old_euid, old_suid, LSM_SETID_RE);
701 }
702
703
704                 
705 /*
706  * setuid() is implemented like SysV with SAVED_IDS 
707  * 
708  * Note that SAVED_ID's is deficient in that a setuid root program
709  * like sendmail, for example, cannot set its uid to be a normal 
710  * user and then switch back, because if you're root, setuid() sets
711  * the saved uid too.  If you don't like this, blame the bright people
712  * in the POSIX committee and/or USG.  Note that the BSD-style setreuid()
713  * will allow a root program to temporarily drop privileges and be able to
714  * regain them by swapping the real and effective uid.  
715  */
716 asmlinkage long sys_setuid(uid_t uid)
717 {
718         int old_euid = current->euid;
719         int old_ruid, old_suid, new_ruid, new_suid;
720         int retval;
721
722         retval = security_task_setuid(uid, (uid_t)-1, (uid_t)-1, LSM_SETID_ID);
723         if (retval)
724                 return retval;
725
726         old_ruid = new_ruid = current->uid;
727         old_suid = current->suid;
728         new_suid = old_suid;
729         
730         if (capable(CAP_SETUID)) {
731                 if (uid != old_ruid && set_user(uid, old_euid != uid) < 0)
732                         return -EAGAIN;
733                 new_suid = uid;
734         } else if ((uid != current->uid) && (uid != new_suid))
735                 return -EPERM;
736
737         if (old_euid != uid)
738         {
739                 current->mm->dumpable = suid_dumpable;
740                 wmb();
741         }
742         current->fsuid = current->euid = uid;
743         current->suid = new_suid;
744
745         key_fsuid_changed(current);
746
747         return security_task_post_setuid(old_ruid, old_euid, old_suid, LSM_SETID_ID);
748 }
749
750
751 /*
752  * This function implements a generic ability to update ruid, euid,
753  * and suid.  This allows you to implement the 4.4 compatible seteuid().
754  */
755 asmlinkage long sys_setresuid(uid_t ruid, uid_t euid, uid_t suid)
756 {
757         int old_ruid = current->uid;
758         int old_euid = current->euid;
759         int old_suid = current->suid;
760         int retval;
761
762         retval = security_task_setuid(ruid, euid, suid, LSM_SETID_RES);
763         if (retval)
764                 return retval;
765
766         if (!capable(CAP_SETUID)) {
767                 if ((ruid != (uid_t) -1) && (ruid != current->uid) &&
768                     (ruid != current->euid) && (ruid != current->suid))
769                         return -EPERM;
770                 if ((euid != (uid_t) -1) && (euid != current->uid) &&
771                     (euid != current->euid) && (euid != current->suid))
772                         return -EPERM;
773                 if ((suid != (uid_t) -1) && (suid != current->uid) &&
774                     (suid != current->euid) && (suid != current->suid))
775                         return -EPERM;
776         }
777         if (ruid != (uid_t) -1) {
778                 if (ruid != current->uid && set_user(ruid, euid != current->euid) < 0)
779                         return -EAGAIN;
780         }
781         if (euid != (uid_t) -1) {
782                 if (euid != current->euid)
783                 {
784                         current->mm->dumpable = suid_dumpable;
785                         wmb();
786                 }
787                 current->euid = euid;
788         }
789         current->fsuid = current->euid;
790         if (suid != (uid_t) -1)
791                 current->suid = suid;
792
793         key_fsuid_changed(current);
794
795         return security_task_post_setuid(old_ruid, old_euid, old_suid, LSM_SETID_RES);
796 }
797
798 asmlinkage long sys_getresuid(uid_t __user *ruid, uid_t __user *euid, uid_t __user *suid)
799 {
800         int retval;
801
802         if (!(retval = put_user(current->uid, ruid)) &&
803             !(retval = put_user(current->euid, euid)))
804                 retval = put_user(current->suid, suid);
805
806         return retval;
807 }
808
809 /*
810  * Same as above, but for rgid, egid, sgid.
811  */
812 asmlinkage long sys_setresgid(gid_t rgid, gid_t egid, gid_t sgid)
813 {
814         int retval;
815
816         retval = security_task_setgid(rgid, egid, sgid, LSM_SETID_RES);
817         if (retval)
818                 return retval;
819
820         if (!capable(CAP_SETGID)) {
821                 if ((rgid != (gid_t) -1) && (rgid != current->gid) &&
822                     (rgid != current->egid) && (rgid != current->sgid))
823                         return -EPERM;
824                 if ((egid != (gid_t) -1) && (egid != current->gid) &&
825                     (egid != current->egid) && (egid != current->sgid))
826                         return -EPERM;
827                 if ((sgid != (gid_t) -1) && (sgid != current->gid) &&
828                     (sgid != current->egid) && (sgid != current->sgid))
829                         return -EPERM;
830         }
831         if (egid != (gid_t) -1) {
832                 if (egid != current->egid)
833                 {
834                         current->mm->dumpable = suid_dumpable;
835                         wmb();
836                 }
837                 current->egid = egid;
838         }
839         current->fsgid = current->egid;
840         if (rgid != (gid_t) -1)
841                 current->gid = rgid;
842         if (sgid != (gid_t) -1)
843                 current->sgid = sgid;
844
845         key_fsgid_changed(current);
846         return 0;
847 }
848
849 asmlinkage long sys_getresgid(gid_t __user *rgid, gid_t __user *egid, gid_t __user *sgid)
850 {
851         int retval;
852
853         if (!(retval = put_user(current->gid, rgid)) &&
854             !(retval = put_user(current->egid, egid)))
855                 retval = put_user(current->sgid, sgid);
856
857         return retval;
858 }
859
860
861 /*
862  * "setfsuid()" sets the fsuid - the uid used for filesystem checks. This
863  * is used for "access()" and for the NFS daemon (letting nfsd stay at
864  * whatever uid it wants to). It normally shadows "euid", except when
865  * explicitly set by setfsuid() or for access..
866  */
867 asmlinkage long sys_setfsuid(uid_t uid)
868 {
869         int old_fsuid;
870
871         old_fsuid = current->fsuid;
872         if (security_task_setuid(uid, (uid_t)-1, (uid_t)-1, LSM_SETID_FS))
873                 return old_fsuid;
874
875         if (uid == current->uid || uid == current->euid ||
876             uid == current->suid || uid == current->fsuid || 
877             capable(CAP_SETUID))
878         {
879                 if (uid != old_fsuid)
880                 {
881                         current->mm->dumpable = suid_dumpable;
882                         wmb();
883                 }
884                 current->fsuid = uid;
885         }
886
887         key_fsuid_changed(current);
888
889         security_task_post_setuid(old_fsuid, (uid_t)-1, (uid_t)-1, LSM_SETID_FS);
890
891         return old_fsuid;
892 }
893
894 /*
895  * Samma pÃ¥ svenska..
896  */
897 asmlinkage long sys_setfsgid(gid_t gid)
898 {
899         int old_fsgid;
900
901         old_fsgid = current->fsgid;
902         if (security_task_setgid(gid, (gid_t)-1, (gid_t)-1, LSM_SETID_FS))
903                 return old_fsgid;
904
905         if (gid == current->gid || gid == current->egid ||
906             gid == current->sgid || gid == current->fsgid || 
907             capable(CAP_SETGID))
908         {
909                 if (gid != old_fsgid)
910                 {
911                         current->mm->dumpable = suid_dumpable;
912                         wmb();
913                 }
914                 current->fsgid = gid;
915                 key_fsgid_changed(current);
916         }
917         return old_fsgid;
918 }
919
920 asmlinkage long sys_times(struct tms __user * tbuf)
921 {
922         /*
923          *      In the SMP world we might just be unlucky and have one of
924          *      the times increment as we use it. Since the value is an
925          *      atomically safe type this is just fine. Conceptually its
926          *      as if the syscall took an instant longer to occur.
927          */
928         if (tbuf) {
929                 struct tms tmp;
930                 struct task_struct *tsk = current;
931                 struct task_struct *t;
932                 unsigned long utime, stime, cutime, cstime;
933
934                 read_lock(&tasklist_lock);
935                 utime = tsk->signal->utime;
936                 stime = tsk->signal->stime;
937                 t = tsk;
938                 do {
939                         utime += t->utime;
940                         stime += t->stime;
941                         t = next_thread(t);
942                 } while (t != tsk);
943
944                 /*
945                  * While we have tasklist_lock read-locked, no dying thread
946                  * can be updating current->signal->[us]time.  Instead,
947                  * we got their counts included in the live thread loop.
948                  * However, another thread can come in right now and
949                  * do a wait call that updates current->signal->c[us]time.
950                  * To make sure we always see that pair updated atomically,
951                  * we take the siglock around fetching them.
952                  */
953                 spin_lock_irq(&tsk->sighand->siglock);
954                 cutime = tsk->signal->cutime;
955                 cstime = tsk->signal->cstime;
956                 spin_unlock_irq(&tsk->sighand->siglock);
957                 read_unlock(&tasklist_lock);
958
959                 tmp.tms_utime = jiffies_to_clock_t(utime);
960                 tmp.tms_stime = jiffies_to_clock_t(stime);
961                 tmp.tms_cutime = jiffies_to_clock_t(cutime);
962                 tmp.tms_cstime = jiffies_to_clock_t(cstime);
963                 if (copy_to_user(tbuf, &tmp, sizeof(struct tms)))
964                         return -EFAULT;
965         }
966         return (long) jiffies_64_to_clock_t(get_jiffies_64());
967 }
968
969 /*
970  * This needs some heavy checking ...
971  * I just haven't the stomach for it. I also don't fully
972  * understand sessions/pgrp etc. Let somebody who does explain it.
973  *
974  * OK, I think I have the protection semantics right.... this is really
975  * only important on a multi-user system anyway, to make sure one user
976  * can't send a signal to a process owned by another.  -TYT, 12/12/91
977  *
978  * Auch. Had to add the 'did_exec' flag to conform completely to POSIX.
979  * LBT 04.03.94
980  */
981
982 asmlinkage long sys_setpgid(pid_t pid, pid_t pgid)
983 {
984         struct task_struct *p;
985         int err = -EINVAL;
986         pid_t rpgid;
987
988         if (!pid)
989                 pid = vx_map_pid(current->pid);
990         if (!pgid)
991                 pgid = pid;
992         if (pgid < 0)
993                 return -EINVAL;
994
995         rpgid = vx_rmap_pid(pgid);
996
997         /* From this point forward we keep holding onto the tasklist lock
998          * so that our parent does not change from under us. -DaveM
999          */
1000         write_lock_irq(&tasklist_lock);
1001
1002         err = -ESRCH;
1003         p = find_task_by_pid(pid);
1004         if (!p)
1005                 goto out;
1006
1007         err = -EINVAL;
1008         if (!thread_group_leader(p))
1009                 goto out;
1010
1011         if (p->parent == current || p->real_parent == current) {
1012                 err = -EPERM;
1013                 if (p->signal->session != current->signal->session)
1014                         goto out;
1015                 err = -EACCES;
1016                 if (p->did_exec)
1017                         goto out;
1018         } else {
1019                 err = -ESRCH;
1020                 if (p != current)
1021                         goto out;
1022         }
1023
1024         err = -EPERM;
1025         if (p->signal->leader)
1026                 goto out;
1027
1028         if (pgid != pid) {
1029                 struct task_struct *p;
1030
1031                 do_each_task_pid(rpgid, PIDTYPE_PGID, p) {
1032                         if (p->signal->session == current->signal->session)
1033                                 goto ok_pgid;
1034                 } while_each_task_pid(rpgid, PIDTYPE_PGID, p);
1035                 goto out;
1036         }
1037
1038 ok_pgid:
1039         err = security_task_setpgid(p, rpgid);
1040         if (err)
1041                 goto out;
1042
1043         if (process_group(p) != rpgid) {
1044                 detach_pid(p, PIDTYPE_PGID);
1045                 p->signal->pgrp = rpgid;
1046                 attach_pid(p, PIDTYPE_PGID, rpgid);
1047         }
1048
1049         err = 0;
1050 out:
1051         /* All paths lead to here, thus we are safe. -DaveM */
1052         write_unlock_irq(&tasklist_lock);
1053         return err;
1054 }
1055
1056 asmlinkage long sys_getpgid(pid_t pid)
1057 {
1058         if (!pid) {
1059                 return vx_rmap_pid(process_group(current));
1060         } else {
1061                 int retval;
1062                 struct task_struct *p;
1063
1064                 read_lock(&tasklist_lock);
1065                 p = find_task_by_pid(pid);
1066
1067                 retval = -ESRCH;
1068                 if (p) {
1069                         retval = security_task_getpgid(p);
1070                         if (!retval)
1071                                 retval = vx_rmap_pid(process_group(p));
1072                 }
1073                 read_unlock(&tasklist_lock);
1074                 return retval;
1075         }
1076 }
1077
1078 #ifdef __ARCH_WANT_SYS_GETPGRP
1079
1080 asmlinkage long sys_getpgrp(void)
1081 {
1082         /* SMP - assuming writes are word atomic this is fine */
1083         return process_group(current);
1084 }
1085
1086 #endif
1087
1088 asmlinkage long sys_getsid(pid_t pid)
1089 {
1090         if (!pid) {
1091                 return current->signal->session;
1092         } else {
1093                 int retval;
1094                 struct task_struct *p;
1095
1096                 read_lock(&tasklist_lock);
1097                 p = find_task_by_pid(pid);
1098
1099                 retval = -ESRCH;
1100                 if(p) {
1101                         retval = security_task_getsid(p);
1102                         if (!retval)
1103                                 retval = p->signal->session;
1104                 }
1105                 read_unlock(&tasklist_lock);
1106                 return retval;
1107         }
1108 }
1109
1110 asmlinkage long sys_setsid(void)
1111 {
1112         struct pid *pid;
1113         int err = -EPERM;
1114
1115         if (!thread_group_leader(current))
1116                 return -EINVAL;
1117
1118         down(&tty_sem);
1119         write_lock_irq(&tasklist_lock);
1120
1121         pid = find_pid(PIDTYPE_PGID, current->pid);
1122         if (pid)
1123                 goto out;
1124
1125         current->signal->leader = 1;
1126         __set_special_pids(current->pid, current->pid);
1127         current->signal->tty = NULL;
1128         current->signal->tty_old_pgrp = 0;
1129         err = process_group(current);
1130 out:
1131         write_unlock_irq(&tasklist_lock);
1132         up(&tty_sem);
1133         return err;
1134 }
1135
1136 /*
1137  * Supplementary group IDs
1138  */
1139
1140 /* init to 2 - one for init_task, one to ensure it is never freed */
1141 struct group_info init_groups = { .usage = ATOMIC_INIT(2) };
1142
1143 struct group_info *groups_alloc(int gidsetsize)
1144 {
1145         struct group_info *group_info;
1146         int nblocks;
1147         int i;
1148
1149         nblocks = (gidsetsize + NGROUPS_PER_BLOCK - 1) / NGROUPS_PER_BLOCK;
1150         /* Make sure we always allocate at least one indirect block pointer */
1151         nblocks = nblocks ? : 1;
1152         group_info = kmalloc(sizeof(*group_info) + nblocks*sizeof(gid_t *), GFP_USER);
1153         if (!group_info)
1154                 return NULL;
1155         group_info->ngroups = gidsetsize;
1156         group_info->nblocks = nblocks;
1157         atomic_set(&group_info->usage, 1);
1158
1159         if (gidsetsize <= NGROUPS_SMALL) {
1160                 group_info->blocks[0] = group_info->small_block;
1161         } else {
1162                 for (i = 0; i < nblocks; i++) {
1163                         gid_t *b;
1164                         b = (void *)__get_free_page(GFP_USER);
1165                         if (!b)
1166                                 goto out_undo_partial_alloc;
1167                         group_info->blocks[i] = b;
1168                 }
1169         }
1170         return group_info;
1171
1172 out_undo_partial_alloc:
1173         while (--i >= 0) {
1174                 free_page((unsigned long)group_info->blocks[i]);
1175         }
1176         kfree(group_info);
1177         return NULL;
1178 }
1179
1180 EXPORT_SYMBOL(groups_alloc);
1181
1182 void groups_free(struct group_info *group_info)
1183 {
1184         if (group_info->blocks[0] != group_info->small_block) {
1185                 int i;
1186                 for (i = 0; i < group_info->nblocks; i++)
1187                         free_page((unsigned long)group_info->blocks[i]);
1188         }
1189         kfree(group_info);
1190 }
1191
1192 EXPORT_SYMBOL(groups_free);
1193
1194 /* export the group_info to a user-space array */
1195 static int groups_to_user(gid_t __user *grouplist,
1196     struct group_info *group_info)
1197 {
1198         int i;
1199         int count = group_info->ngroups;
1200
1201         for (i = 0; i < group_info->nblocks; i++) {
1202                 int cp_count = min(NGROUPS_PER_BLOCK, count);
1203                 int off = i * NGROUPS_PER_BLOCK;
1204                 int len = cp_count * sizeof(*grouplist);
1205
1206                 if (copy_to_user(grouplist+off, group_info->blocks[i], len))
1207                         return -EFAULT;
1208
1209                 count -= cp_count;
1210         }
1211         return 0;
1212 }
1213
1214 /* fill a group_info from a user-space array - it must be allocated already */
1215 static int groups_from_user(struct group_info *group_info,
1216     gid_t __user *grouplist)
1217  {
1218         int i;
1219         int count = group_info->ngroups;
1220
1221         for (i = 0; i < group_info->nblocks; i++) {
1222                 int cp_count = min(NGROUPS_PER_BLOCK, count);
1223                 int off = i * NGROUPS_PER_BLOCK;
1224                 int len = cp_count * sizeof(*grouplist);
1225
1226                 if (copy_from_user(group_info->blocks[i], grouplist+off, len))
1227                         return -EFAULT;
1228
1229                 count -= cp_count;
1230         }
1231         return 0;
1232 }
1233
1234 /* a simple shell-metzner sort */
1235 static void groups_sort(struct group_info *group_info)
1236 {
1237         int base, max, stride;
1238         int gidsetsize = group_info->ngroups;
1239
1240         for (stride = 1; stride < gidsetsize; stride = 3 * stride + 1)
1241                 ; /* nothing */
1242         stride /= 3;
1243
1244         while (stride) {
1245                 max = gidsetsize - stride;
1246                 for (base = 0; base < max; base++) {
1247                         int left = base;
1248                         int right = left + stride;
1249                         gid_t tmp = GROUP_AT(group_info, right);
1250
1251                         while (left >= 0 && GROUP_AT(group_info, left) > tmp) {
1252                                 GROUP_AT(group_info, right) =
1253                                     GROUP_AT(group_info, left);
1254                                 right = left;
1255                                 left -= stride;
1256                         }
1257                         GROUP_AT(group_info, right) = tmp;
1258                 }
1259                 stride /= 3;
1260         }
1261 }
1262
1263 /* a simple bsearch */
1264 static int groups_search(struct group_info *group_info, gid_t grp)
1265 {
1266         int left, right;
1267
1268         if (!group_info)
1269                 return 0;
1270
1271         left = 0;
1272         right = group_info->ngroups;
1273         while (left < right) {
1274                 int mid = (left+right)/2;
1275                 int cmp = grp - GROUP_AT(group_info, mid);
1276                 if (cmp > 0)
1277                         left = mid + 1;
1278                 else if (cmp < 0)
1279                         right = mid;
1280                 else
1281                         return 1;
1282         }
1283         return 0;
1284 }
1285
1286 /* validate and set current->group_info */
1287 int set_current_groups(struct group_info *group_info)
1288 {
1289         int retval;
1290         struct group_info *old_info;
1291
1292         retval = security_task_setgroups(group_info);
1293         if (retval)
1294                 return retval;
1295
1296         groups_sort(group_info);
1297         get_group_info(group_info);
1298
1299         task_lock(current);
1300         old_info = current->group_info;
1301         current->group_info = group_info;
1302         task_unlock(current);
1303
1304         put_group_info(old_info);
1305
1306         return 0;
1307 }
1308
1309 EXPORT_SYMBOL(set_current_groups);
1310
1311 asmlinkage long sys_getgroups(int gidsetsize, gid_t __user *grouplist)
1312 {
1313         int i = 0;
1314
1315         /*
1316          *      SMP: Nobody else can change our grouplist. Thus we are
1317          *      safe.
1318          */
1319
1320         if (gidsetsize < 0)
1321                 return -EINVAL;
1322
1323         /* no need to grab task_lock here; it cannot change */
1324         get_group_info(current->group_info);
1325         i = current->group_info->ngroups;
1326         if (gidsetsize) {
1327                 if (i > gidsetsize) {
1328                         i = -EINVAL;
1329                         goto out;
1330                 }
1331                 if (groups_to_user(grouplist, current->group_info)) {
1332                         i = -EFAULT;
1333                         goto out;
1334                 }
1335         }
1336 out:
1337         put_group_info(current->group_info);
1338         return i;
1339 }
1340
1341 /*
1342  *      SMP: Our groups are copy-on-write. We can set them safely
1343  *      without another task interfering.
1344  */
1345  
1346 asmlinkage long sys_setgroups(int gidsetsize, gid_t __user *grouplist)
1347 {
1348         struct group_info *group_info;
1349         int retval;
1350
1351         if (!capable(CAP_SETGID))
1352                 return -EPERM;
1353         if ((unsigned)gidsetsize > NGROUPS_MAX)
1354                 return -EINVAL;
1355
1356         group_info = groups_alloc(gidsetsize);
1357         if (!group_info)
1358                 return -ENOMEM;
1359         retval = groups_from_user(group_info, grouplist);
1360         if (retval) {
1361                 put_group_info(group_info);
1362                 return retval;
1363         }
1364
1365         retval = set_current_groups(group_info);
1366         put_group_info(group_info);
1367
1368         return retval;
1369 }
1370
1371 /*
1372  * Check whether we're fsgid/egid or in the supplemental group..
1373  */
1374 int in_group_p(gid_t grp)
1375 {
1376         int retval = 1;
1377         if (grp != current->fsgid) {
1378                 get_group_info(current->group_info);
1379                 retval = groups_search(current->group_info, grp);
1380                 put_group_info(current->group_info);
1381         }
1382         return retval;
1383 }
1384
1385 EXPORT_SYMBOL(in_group_p);
1386
1387 int in_egroup_p(gid_t grp)
1388 {
1389         int retval = 1;
1390         if (grp != current->egid) {
1391                 get_group_info(current->group_info);
1392                 retval = groups_search(current->group_info, grp);
1393                 put_group_info(current->group_info);
1394         }
1395         return retval;
1396 }
1397
1398 EXPORT_SYMBOL(in_egroup_p);
1399
1400 DECLARE_RWSEM(uts_sem);
1401
1402 EXPORT_SYMBOL(uts_sem);
1403
1404 asmlinkage long sys_newuname(struct new_utsname __user * name)
1405 {
1406         int errno = 0;
1407
1408         down_read(&uts_sem);
1409         if (copy_to_user(name, vx_new_utsname(), sizeof *name))
1410                 errno = -EFAULT;
1411         up_read(&uts_sem);
1412         return errno;
1413 }
1414
1415 asmlinkage long sys_sethostname(char __user *name, int len)
1416 {
1417         int errno;
1418         char tmp[__NEW_UTS_LEN];
1419
1420         if (!capable(CAP_SYS_ADMIN) && !vx_ccaps(VXC_SET_UTSNAME))
1421                 return -EPERM;
1422         if (len < 0 || len > __NEW_UTS_LEN)
1423                 return -EINVAL;
1424         down_write(&uts_sem);
1425         errno = -EFAULT;
1426         if (!copy_from_user(tmp, name, len)) {
1427                 char *ptr = vx_new_uts(nodename);
1428
1429                 memcpy(ptr, tmp, len);
1430                 ptr[len] = 0;
1431                 errno = 0;
1432         }
1433         up_write(&uts_sem);
1434         return errno;
1435 }
1436
1437 #ifdef __ARCH_WANT_SYS_GETHOSTNAME
1438
1439 asmlinkage long sys_gethostname(char __user *name, int len)
1440 {
1441         int i, errno;
1442         char *ptr;
1443
1444         if (len < 0)
1445                 return -EINVAL;
1446         down_read(&uts_sem);
1447         ptr = vx_new_uts(nodename);
1448         i = 1 + strlen(ptr);
1449         if (i > len)
1450                 i = len;
1451         errno = 0;
1452         if (copy_to_user(name, ptr, i))
1453                 errno = -EFAULT;
1454         up_read(&uts_sem);
1455         return errno;
1456 }
1457
1458 #endif
1459
1460 /*
1461  * Only setdomainname; getdomainname can be implemented by calling
1462  * uname()
1463  */
1464 asmlinkage long sys_setdomainname(char __user *name, int len)
1465 {
1466         int errno;
1467         char tmp[__NEW_UTS_LEN];
1468
1469         if (!capable(CAP_SYS_ADMIN) && !vx_ccaps(VXC_SET_UTSNAME))
1470                 return -EPERM;
1471         if (len < 0 || len > __NEW_UTS_LEN)
1472                 return -EINVAL;
1473
1474         down_write(&uts_sem);
1475         errno = -EFAULT;
1476         if (!copy_from_user(tmp, name, len)) {
1477                 char *ptr = vx_new_uts(domainname);
1478
1479                 memcpy(ptr, tmp, len);
1480                 ptr[len] = 0;
1481                 errno = 0;
1482         }
1483         up_write(&uts_sem);
1484         return errno;
1485 }
1486
1487 asmlinkage long sys_getrlimit(unsigned int resource, struct rlimit __user *rlim)
1488 {
1489         if (resource >= RLIM_NLIMITS)
1490                 return -EINVAL;
1491         else {
1492                 struct rlimit value;
1493                 task_lock(current->group_leader);
1494                 value = current->signal->rlim[resource];
1495                 task_unlock(current->group_leader);
1496                 return copy_to_user(rlim, &value, sizeof(*rlim)) ? -EFAULT : 0;
1497         }
1498 }
1499
1500 #ifdef __ARCH_WANT_SYS_OLD_GETRLIMIT
1501
1502 /*
1503  *      Back compatibility for getrlimit. Needed for some apps.
1504  */
1505  
1506 asmlinkage long sys_old_getrlimit(unsigned int resource, struct rlimit __user *rlim)
1507 {
1508         struct rlimit x;
1509         if (resource >= RLIM_NLIMITS)
1510                 return -EINVAL;
1511
1512         task_lock(current->group_leader);
1513         x = current->signal->rlim[resource];
1514         task_unlock(current->group_leader);
1515         if(x.rlim_cur > 0x7FFFFFFF)
1516                 x.rlim_cur = 0x7FFFFFFF;
1517         if(x.rlim_max > 0x7FFFFFFF)
1518                 x.rlim_max = 0x7FFFFFFF;
1519         return copy_to_user(rlim, &x, sizeof(x))?-EFAULT:0;
1520 }
1521
1522 #endif
1523
1524 asmlinkage long sys_setrlimit(unsigned int resource, struct rlimit __user *rlim)
1525 {
1526         struct rlimit new_rlim, *old_rlim;
1527         int retval;
1528
1529         if (resource >= RLIM_NLIMITS)
1530                 return -EINVAL;
1531         if(copy_from_user(&new_rlim, rlim, sizeof(*rlim)))
1532                 return -EFAULT;
1533        if (new_rlim.rlim_cur > new_rlim.rlim_max)
1534                return -EINVAL;
1535         old_rlim = current->signal->rlim + resource;
1536         if ((new_rlim.rlim_max > old_rlim->rlim_max) &&
1537             !capable(CAP_SYS_RESOURCE) && !vx_ccaps(VXC_SET_RLIMIT))
1538                 return -EPERM;
1539         if (resource == RLIMIT_NOFILE && new_rlim.rlim_max > NR_OPEN)
1540                         return -EPERM;
1541
1542         retval = security_task_setrlimit(resource, &new_rlim);
1543         if (retval)
1544                 return retval;
1545
1546         task_lock(current->group_leader);
1547         *old_rlim = new_rlim;
1548         task_unlock(current->group_leader);
1549         return 0;
1550 }
1551
1552 /*
1553  * It would make sense to put struct rusage in the task_struct,
1554  * except that would make the task_struct be *really big*.  After
1555  * task_struct gets moved into malloc'ed memory, it would
1556  * make sense to do this.  It will make moving the rest of the information
1557  * a lot simpler!  (Which we're not doing right now because we're not
1558  * measuring them yet).
1559  *
1560  * This expects to be called with tasklist_lock read-locked or better,
1561  * and the siglock not locked.  It may momentarily take the siglock.
1562  *
1563  * When sampling multiple threads for RUSAGE_SELF, under SMP we might have
1564  * races with threads incrementing their own counters.  But since word
1565  * reads are atomic, we either get new values or old values and we don't
1566  * care which for the sums.  We always take the siglock to protect reading
1567  * the c* fields from p->signal from races with exit.c updating those
1568  * fields when reaping, so a sample either gets all the additions of a
1569  * given child after it's reaped, or none so this sample is before reaping.
1570  */
1571
1572 void k_getrusage(struct task_struct *p, int who, struct rusage *r)
1573 {
1574         struct task_struct *t;
1575         unsigned long flags;
1576         unsigned long utime, stime;
1577
1578         memset((char *) r, 0, sizeof *r);
1579
1580         if (unlikely(!p->signal))
1581                 return;
1582
1583         switch (who) {
1584                 case RUSAGE_CHILDREN:
1585                         spin_lock_irqsave(&p->sighand->siglock, flags);
1586                         utime = p->signal->cutime;
1587                         stime = p->signal->cstime;
1588                         r->ru_nvcsw = p->signal->cnvcsw;
1589                         r->ru_nivcsw = p->signal->cnivcsw;
1590                         r->ru_minflt = p->signal->cmin_flt;
1591                         r->ru_majflt = p->signal->cmaj_flt;
1592                         spin_unlock_irqrestore(&p->sighand->siglock, flags);
1593                         jiffies_to_timeval(utime, &r->ru_utime);
1594                         jiffies_to_timeval(stime, &r->ru_stime);
1595                         break;
1596                 case RUSAGE_SELF:
1597                         spin_lock_irqsave(&p->sighand->siglock, flags);
1598                         utime = stime = 0;
1599                         goto sum_group;
1600                 case RUSAGE_BOTH:
1601                         spin_lock_irqsave(&p->sighand->siglock, flags);
1602                         utime = p->signal->cutime;
1603                         stime = p->signal->cstime;
1604                         r->ru_nvcsw = p->signal->cnvcsw;
1605                         r->ru_nivcsw = p->signal->cnivcsw;
1606                         r->ru_minflt = p->signal->cmin_flt;
1607                         r->ru_majflt = p->signal->cmaj_flt;
1608                 sum_group:
1609                         utime += p->signal->utime;
1610                         stime += p->signal->stime;
1611                         r->ru_nvcsw += p->signal->nvcsw;
1612                         r->ru_nivcsw += p->signal->nivcsw;
1613                         r->ru_minflt += p->signal->min_flt;
1614                         r->ru_majflt += p->signal->maj_flt;
1615                         t = p;
1616                         do {
1617                                 utime += t->utime;
1618                                 stime += t->stime;
1619                                 r->ru_nvcsw += t->nvcsw;
1620                                 r->ru_nivcsw += t->nivcsw;
1621                                 r->ru_minflt += t->min_flt;
1622                                 r->ru_majflt += t->maj_flt;
1623                                 t = next_thread(t);
1624                         } while (t != p);
1625                         spin_unlock_irqrestore(&p->sighand->siglock, flags);
1626                         jiffies_to_timeval(utime, &r->ru_utime);
1627                         jiffies_to_timeval(stime, &r->ru_stime);
1628                         break;
1629                 default:
1630                         BUG();
1631         }
1632 }
1633
1634 int getrusage(struct task_struct *p, int who, struct rusage __user *ru)
1635 {
1636         struct rusage r;
1637         read_lock(&tasklist_lock);
1638         k_getrusage(p, who, &r);
1639         read_unlock(&tasklist_lock);
1640         return copy_to_user(ru, &r, sizeof(r)) ? -EFAULT : 0;
1641 }
1642
1643 asmlinkage long sys_getrusage(int who, struct rusage __user *ru)
1644 {
1645         if (who != RUSAGE_SELF && who != RUSAGE_CHILDREN)
1646                 return -EINVAL;
1647         return getrusage(current, who, ru);
1648 }
1649
1650 asmlinkage long sys_umask(int mask)
1651 {
1652         mask = xchg(&current->fs->umask, mask & S_IRWXUGO);
1653         return mask;
1654 }
1655     
1656 asmlinkage long sys_prctl(int option, unsigned long arg2, unsigned long arg3,
1657                           unsigned long arg4, unsigned long arg5)
1658 {
1659         long error;
1660         int sig;
1661
1662         error = security_task_prctl(option, arg2, arg3, arg4, arg5);
1663         if (error)
1664                 return error;
1665
1666         switch (option) {
1667                 case PR_SET_PDEATHSIG:
1668                         sig = arg2;
1669                         if (sig < 0 || sig > _NSIG) {
1670                                 error = -EINVAL;
1671                                 break;
1672                         }
1673                         current->pdeath_signal = sig;
1674                         break;
1675                 case PR_GET_PDEATHSIG:
1676                         error = put_user(current->pdeath_signal, (int __user *)arg2);
1677                         break;
1678                 case PR_GET_DUMPABLE:
1679                         if (current->mm->dumpable)
1680                                 error = 1;
1681                         break;
1682                 case PR_SET_DUMPABLE:
1683                         if (arg2 < 0 && arg2 > 2) {
1684                                 error = -EINVAL;
1685                                 break;
1686                         }
1687                         current->mm->dumpable = arg2;
1688                         break;
1689
1690                 case PR_SET_UNALIGN:
1691                         error = SET_UNALIGN_CTL(current, arg2);
1692                         break;
1693                 case PR_GET_UNALIGN:
1694                         error = GET_UNALIGN_CTL(current, arg2);
1695                         break;
1696                 case PR_SET_FPEMU:
1697                         error = SET_FPEMU_CTL(current, arg2);
1698                         break;
1699                 case PR_GET_FPEMU:
1700                         error = GET_FPEMU_CTL(current, arg2);
1701                         break;
1702                 case PR_SET_FPEXC:
1703                         error = SET_FPEXC_CTL(current, arg2);
1704                         break;
1705                 case PR_GET_FPEXC:
1706                         error = GET_FPEXC_CTL(current, arg2);
1707                         break;
1708                 case PR_GET_TIMING:
1709                         error = PR_TIMING_STATISTICAL;
1710                         break;
1711                 case PR_SET_TIMING:
1712                         if (arg2 == PR_TIMING_STATISTICAL)
1713                                 error = 0;
1714                         else
1715                                 error = -EINVAL;
1716                         break;
1717
1718                 case PR_GET_KEEPCAPS:
1719                         if (current->keep_capabilities)
1720                                 error = 1;
1721                         break;
1722                 case PR_SET_KEEPCAPS:
1723                         if (arg2 != 0 && arg2 != 1) {
1724                                 error = -EINVAL;
1725                                 break;
1726                         }
1727                         current->keep_capabilities = arg2;
1728                         break;
1729                 case PR_SET_NAME: {
1730                         struct task_struct *me = current;
1731                         unsigned char ncomm[sizeof(me->comm)];
1732
1733                         ncomm[sizeof(me->comm)-1] = 0;
1734                         if (strncpy_from_user(ncomm, (char __user *)arg2,
1735                                                 sizeof(me->comm)-1) < 0)
1736                                 return -EFAULT;
1737                         set_task_comm(me, ncomm);
1738                         return 0;
1739                 }
1740                 default:
1741                         error = -EINVAL;
1742                         break;
1743         }
1744         return error;
1745 }