vserver 1.9.3
[linux-2.6.git] / kernel / sys.c
index e7a076d..1ba0e4c 100644 (file)
@@ -277,7 +277,9 @@ cond_syscall(compat_sys_mq_getsetattr)
 cond_syscall(sys_mbind)
 cond_syscall(sys_get_mempolicy)
 cond_syscall(sys_set_mempolicy)
+cond_syscall(compat_mbind)
 cond_syscall(compat_get_mempolicy)
+cond_syscall(compat_set_mempolicy)
 
 /* arch-specific weak syscall entries */
 cond_syscall(sys_pciconfig_read)
@@ -294,7 +296,10 @@ static int set_one_prio(struct task_struct *p, int niceval, int error)
                goto out;
        }
        if (niceval < task_nice(p) && !capable(CAP_SYS_NICE)) {
-               error = -EACCES;
+               if (vx_flags(VXF_IGNEG_NICE, 0))
+                       error = 0;
+               else
+                       error = -EACCES;
                goto out;
        }
        no_nice = security_task_setnice(p, niceval);
@@ -313,8 +318,6 @@ asmlinkage long sys_setpriority(int which, int who, int niceval)
 {
        struct task_struct *g, *p;
        struct user_struct *user;
-       struct pid *pid;
-       struct list_head *l;
        int error = -EINVAL;
 
        if (which > 2 || which < 0)
@@ -339,8 +342,9 @@ asmlinkage long sys_setpriority(int which, int who, int niceval)
                case PRIO_PGRP:
                        if (!who)
                                who = process_group(current);
-                       for_each_task_pid(who, PIDTYPE_PGID, p, l, pid)
+                       do_each_task_pid(who, PIDTYPE_PGID, p) {
                                error = set_one_prio(p, niceval, error);
+                       } while_each_task_pid(who, PIDTYPE_PGID, p);
                        break;
                case PRIO_USER:
                        if (!who)
@@ -374,8 +378,6 @@ out:
 asmlinkage long sys_getpriority(int which, int who)
 {
        struct task_struct *g, *p;
-       struct list_head *l;
-       struct pid *pid;
        struct user_struct *user;
        long niceval, retval = -ESRCH;
 
@@ -397,11 +399,11 @@ asmlinkage long sys_getpriority(int which, int who)
                case PRIO_PGRP:
                        if (!who)
                                who = process_group(current);
-                       for_each_task_pid(who, PIDTYPE_PGID, p, l, pid) {
+                       do_each_task_pid(who, PIDTYPE_PGID, p) {
                                niceval = 20 - task_nice(p);
                                if (niceval > retval)
                                        retval = niceval;
-                       }
+                       } while_each_task_pid(who, PIDTYPE_PGID, p);
                        break;
                case PRIO_USER:
                        if (!who)
@@ -954,10 +956,39 @@ asmlinkage long sys_times(struct tms __user * tbuf)
         */
        if (tbuf) {
                struct tms tmp;
-               tmp.tms_utime = jiffies_to_clock_t(current->utime);
-               tmp.tms_stime = jiffies_to_clock_t(current->stime);
-               tmp.tms_cutime = jiffies_to_clock_t(current->cutime);
-               tmp.tms_cstime = jiffies_to_clock_t(current->cstime);
+               struct task_struct *tsk = current;
+               struct task_struct *t;
+               unsigned long utime, stime, cutime, cstime;
+
+               read_lock(&tasklist_lock);
+               utime = tsk->signal->utime;
+               stime = tsk->signal->stime;
+               t = tsk;
+               do {
+                       utime += t->utime;
+                       stime += t->stime;
+                       t = next_thread(t);
+               } while (t != tsk);
+
+               /*
+                * While we have tasklist_lock read-locked, no dying thread
+                * can be updating current->signal->[us]time.  Instead,
+                * we got their counts included in the live thread loop.
+                * However, another thread can come in right now and
+                * do a wait call that updates current->signal->c[us]time.
+                * To make sure we always see that pair updated atomically,
+                * we take the siglock around fetching them.
+                */
+               spin_lock_irq(&tsk->sighand->siglock);
+               cutime = tsk->signal->cutime;
+               cstime = tsk->signal->cstime;
+               spin_unlock_irq(&tsk->sighand->siglock);
+               read_unlock(&tasklist_lock);
+
+               tmp.tms_utime = jiffies_to_clock_t(utime);
+               tmp.tms_stime = jiffies_to_clock_t(stime);
+               tmp.tms_cutime = jiffies_to_clock_t(cutime);
+               tmp.tms_cstime = jiffies_to_clock_t(cstime);
                if (copy_to_user(tbuf, &tmp, sizeof(struct tms)))
                        return -EFAULT;
        }
@@ -981,14 +1012,17 @@ asmlinkage long sys_setpgid(pid_t pid, pid_t pgid)
 {
        struct task_struct *p;
        int err = -EINVAL;
+       pid_t rpgid;
 
        if (!pid)
-               pid = current->pid;
+               pid = vx_map_pid(current->pid);
        if (!pgid)
                pgid = pid;
        if (pgid < 0)
                return -EINVAL;
 
+       rpgid = vx_rmap_pid(pgid);
+
        /* From this point forward we keep holding onto the tasklist lock
         * so that our parent does not change from under us. -DaveM
         */
@@ -1022,24 +1056,23 @@ asmlinkage long sys_setpgid(pid_t pid, pid_t pgid)
 
        if (pgid != pid) {
                struct task_struct *p;
-               struct pid *pid;
-               struct list_head *l;
 
-               for_each_task_pid(pgid, PIDTYPE_PGID, p, l, pid)
+               do_each_task_pid(rpgid, PIDTYPE_PGID, p) {
                        if (p->signal->session == current->signal->session)
                                goto ok_pgid;
+               } while_each_task_pid(rpgid, PIDTYPE_PGID, p);
                goto out;
        }
 
 ok_pgid:
-       err = security_task_setpgid(p, pgid);
+       err = security_task_setpgid(p, rpgid);
        if (err)
                goto out;
 
-       if (process_group(p) != pgid) {
+       if (process_group(p) != rpgid) {
                detach_pid(p, PIDTYPE_PGID);
-               p->signal->pgrp = pgid;
-               attach_pid(p, PIDTYPE_PGID, pgid);
+               p->signal->pgrp = rpgid;
+               attach_pid(p, PIDTYPE_PGID, rpgid);
        }
 
        err = 0;
@@ -1052,7 +1085,7 @@ out:
 asmlinkage long sys_getpgid(pid_t pid)
 {
        if (!pid) {
-               return process_group(current);
+               return vx_rmap_pid(process_group(current));
        } else {
                int retval;
                struct task_struct *p;
@@ -1064,7 +1097,7 @@ asmlinkage long sys_getpgid(pid_t pid)
                if (p) {
                        retval = security_task_getpgid(p);
                        if (!retval)
-                               retval = process_group(p);
+                               retval = vx_rmap_pid(process_group(p));
                }
                read_unlock(&tasklist_lock);
                return retval;
@@ -1546,44 +1579,86 @@ asmlinkage long sys_setrlimit(unsigned int resource, struct rlimit __user *rlim)
  * a lot simpler!  (Which we're not doing right now because we're not
  * measuring them yet).
  *
- * This is SMP safe.  Either we are called from sys_getrusage on ourselves
- * below (we know we aren't going to exit/disappear and only we change our
- * rusage counters), or we are called from wait4() on a process which is
- * either stopped or zombied.  In the zombied case the task won't get
- * reaped till shortly after the call to getrusage(), in both cases the
- * task being examined is in a frozen state so the counters won't change.
+ * This expects to be called with tasklist_lock read-locked or better,
+ * and the siglock not locked.  It may momentarily take the siglock.
+ *
+ * When sampling multiple threads for RUSAGE_SELF, under SMP we might have
+ * races with threads incrementing their own counters.  But since word
+ * reads are atomic, we either get new values or old values and we don't
+ * care which for the sums.  We always take the siglock to protect reading
+ * the c* fields from p->signal from races with exit.c updating those
+ * fields when reaping, so a sample either gets all the additions of a
+ * given child after it's reaped, or none so this sample is before reaping.
  */
-int getrusage(struct task_struct *p, int who, struct rusage __user *ru)
+
+void k_getrusage(struct task_struct *p, int who, struct rusage *r)
 {
-       struct rusage r;
+       struct task_struct *t;
+       unsigned long flags;
+       unsigned long utime, stime;
+
+       memset((char *) r, 0, sizeof *r);
+
+       if (unlikely(!p->signal))
+               return;
 
-       memset((char *) &r, 0, sizeof(r));
        switch (who) {
-               case RUSAGE_SELF:
-                       jiffies_to_timeval(p->utime, &r.ru_utime);
-                       jiffies_to_timeval(p->stime, &r.ru_stime);
-                       r.ru_nvcsw = p->nvcsw;
-                       r.ru_nivcsw = p->nivcsw;
-                       r.ru_minflt = p->min_flt;
-                       r.ru_majflt = p->maj_flt;
-                       break;
                case RUSAGE_CHILDREN:
-                       jiffies_to_timeval(p->cutime, &r.ru_utime);
-                       jiffies_to_timeval(p->cstime, &r.ru_stime);
-                       r.ru_nvcsw = p->cnvcsw;
-                       r.ru_nivcsw = p->cnivcsw;
-                       r.ru_minflt = p->cmin_flt;
-                       r.ru_majflt = p->cmaj_flt;
+                       spin_lock_irqsave(&p->sighand->siglock, flags);
+                       utime = p->signal->cutime;
+                       stime = p->signal->cstime;
+                       r->ru_nvcsw = p->signal->cnvcsw;
+                       r->ru_nivcsw = p->signal->cnivcsw;
+                       r->ru_minflt = p->signal->cmin_flt;
+                       r->ru_majflt = p->signal->cmaj_flt;
+                       spin_unlock_irqrestore(&p->sighand->siglock, flags);
+                       jiffies_to_timeval(utime, &r->ru_utime);
+                       jiffies_to_timeval(stime, &r->ru_stime);
                        break;
-               default:
-                       jiffies_to_timeval(p->utime + p->cutime, &r.ru_utime);
-                       jiffies_to_timeval(p->stime + p->cstime, &r.ru_stime);
-                       r.ru_nvcsw = p->nvcsw + p->cnvcsw;
-                       r.ru_nivcsw = p->nivcsw + p->cnivcsw;
-                       r.ru_minflt = p->min_flt + p->cmin_flt;
-                       r.ru_majflt = p->maj_flt + p->cmaj_flt;
+               case RUSAGE_SELF:
+                       spin_lock_irqsave(&p->sighand->siglock, flags);
+                       utime = stime = 0;
+                       goto sum_group;
+               case RUSAGE_BOTH:
+                       spin_lock_irqsave(&p->sighand->siglock, flags);
+                       utime = p->signal->cutime;
+                       stime = p->signal->cstime;
+                       r->ru_nvcsw = p->signal->cnvcsw;
+                       r->ru_nivcsw = p->signal->cnivcsw;
+                       r->ru_minflt = p->signal->cmin_flt;
+                       r->ru_majflt = p->signal->cmaj_flt;
+               sum_group:
+                       utime += p->signal->utime;
+                       stime += p->signal->stime;
+                       r->ru_nvcsw += p->signal->nvcsw;
+                       r->ru_nivcsw += p->signal->nivcsw;
+                       r->ru_minflt += p->signal->min_flt;
+                       r->ru_majflt += p->signal->maj_flt;
+                       t = p;
+                       do {
+                               utime += t->utime;
+                               stime += t->stime;
+                               r->ru_nvcsw += t->nvcsw;
+                               r->ru_nivcsw += t->nivcsw;
+                               r->ru_minflt += t->min_flt;
+                               r->ru_majflt += t->maj_flt;
+                               t = next_thread(t);
+                       } while (t != p);
+                       spin_unlock_irqrestore(&p->sighand->siglock, flags);
+                       jiffies_to_timeval(utime, &r->ru_utime);
+                       jiffies_to_timeval(stime, &r->ru_stime);
                        break;
+               default:
+                       BUG();
        }
+}
+
+int getrusage(struct task_struct *p, int who, struct rusage __user *ru)
+{
+       struct rusage r;
+       read_lock(&tasklist_lock);
+       k_getrusage(p, who, &r);
+       read_unlock(&tasklist_lock);
        return copy_to_user(ru, &r, sizeof(r)) ? -EFAULT : 0;
 }
 
@@ -1673,6 +1748,17 @@ asmlinkage long sys_prctl(int option, unsigned long arg2, unsigned long arg3,
                        }
                        current->keep_capabilities = arg2;
                        break;
+               case PR_SET_NAME: {
+                       struct task_struct *me = current;
+                       unsigned char ncomm[sizeof(me->comm)];
+
+                       ncomm[sizeof(me->comm)-1] = 0;
+                       if (strncpy_from_user(ncomm, (char __user *)arg2,
+                                               sizeof(me->comm)-1) < 0)
+                               return -EFAULT;
+                       set_task_comm(me, ncomm);
+                       return 0;
+               }
                default:
                        error = -EINVAL;
                        break;