fedora core 6 1.2949 + vserver 2.2.0
[linux-2.6.git] / kernel / user.c
index 850b0ae..902de43 100644 (file)
 #include <linux/sched.h>
 #include <linux/slab.h>
 #include <linux/bitops.h>
+#include <linux/key.h>
+#include <linux/interrupt.h>
 
 /*
  * UID task count cache, to get fast user lookup in "alloc_uid"
  * when changing user ID's (ie setuid() and friends).
  */
-#define UIDHASH_BITS           8
+
+#define UIDHASH_BITS (CONFIG_BASE_SMALL ? 3 : 8)
 #define UIDHASH_SZ             (1 << UIDHASH_BITS)
 #define UIDHASH_MASK           (UIDHASH_SZ - 1)
 #define __uidhashfn(xid,uid)   ((((uid) >> UIDHASH_BITS) + ((uid)^(xid))) & UIDHASH_MASK)
 #define uidhashentry(xid,uid)  (uidhash_table + __uidhashfn((xid),(uid)))
 
-static kmem_cache_t *uid_cachep;
+static struct kmem_cache *uid_cachep;
 static struct list_head uidhash_table[UIDHASH_SZ];
-static spinlock_t uidhash_lock = SPIN_LOCK_UNLOCKED;
+
+/*
+ * The uidhash_lock is mostly taken from process context, but it is
+ * occasionally also taken from softirq/tasklet context, when
+ * task-structs get RCU-freed. Hence all locking must be softirq-safe.
+ * But free_uid() is also called with local interrupts disabled, and running
+ * local_bh_enable() with local interrupts disabled is an error - we'll run
+ * softirq callbacks, and they can unconditionally enable interrupts, and
+ * the caller of free_uid() didn't expect that..
+ */
+static DEFINE_SPINLOCK(uidhash_lock);
 
 struct user_struct root_user = {
        .__count        = ATOMIC_INIT(1),
        .processes      = ATOMIC_INIT(1),
        .files          = ATOMIC_INIT(0),
        .sigpending     = ATOMIC_INIT(0),
-       .mq_bytes       = 0
+       .mq_bytes       = 0,
+       .locked_shm     = 0,
+#ifdef CONFIG_KEYS
+       .uid_keyring    = &root_user_keyring,
+       .session_keyring = &root_session_keyring,
+#endif
 };
 
 /*
@@ -75,19 +93,30 @@ static inline struct user_struct *uid_hash_find(xid_t xid, uid_t uid, struct lis
 struct user_struct *find_user(xid_t xid, uid_t uid)
 {
        struct user_struct *ret;
+       unsigned long flags;
 
-       spin_lock(&uidhash_lock);
+       spin_lock_irqsave(&uidhash_lock, flags);
        ret = uid_hash_find(xid, uid, uidhashentry(xid, uid));
-       spin_unlock(&uidhash_lock);
+       spin_unlock_irqrestore(&uidhash_lock, flags);
        return ret;
 }
 
 void free_uid(struct user_struct *up)
 {
-       if (up && atomic_dec_and_lock(&up->__count, &uidhash_lock)) {
+       unsigned long flags;
+
+       if (!up)
+               return;
+
+       local_irq_save(flags);
+       if (atomic_dec_and_lock(&up->__count, &uidhash_lock)) {
                uid_hash_remove(up);
+               spin_unlock_irqrestore(&uidhash_lock, flags);
+               key_put(up->uid_keyring);
+               key_put(up->session_keyring);
                kmem_cache_free(uid_cachep, up);
-               spin_unlock(&uidhash_lock);
+       } else {
+               local_irq_restore(flags);
        }
 }
 
@@ -96,14 +125,14 @@ struct user_struct * alloc_uid(xid_t xid, uid_t uid)
        struct list_head *hashent = uidhashentry(xid, uid);
        struct user_struct *up;
 
-       spin_lock(&uidhash_lock);
+       spin_lock_irq(&uidhash_lock);
        up = uid_hash_find(xid, uid, hashent);
-       spin_unlock(&uidhash_lock);
+       spin_unlock_irq(&uidhash_lock);
 
        if (!up) {
                struct user_struct *new;
 
-               new = kmem_cache_alloc(uid_cachep, SLAB_KERNEL);
+               new = kmem_cache_alloc(uid_cachep, GFP_KERNEL);
                if (!new)
                        return NULL;
                new->uid = uid;
@@ -112,22 +141,34 @@ struct user_struct * alloc_uid(xid_t xid, uid_t uid)
                atomic_set(&new->processes, 0);
                atomic_set(&new->files, 0);
                atomic_set(&new->sigpending, 0);
+#ifdef CONFIG_INOTIFY_USER
+               atomic_set(&new->inotify_watches, 0);
+               atomic_set(&new->inotify_devs, 0);
+#endif
 
                new->mq_bytes = 0;
+               new->locked_shm = 0;
+
+               if (alloc_uid_keyring(new, current) < 0) {
+                       kmem_cache_free(uid_cachep, new);
+                       return NULL;
+               }
 
                /*
                 * Before adding this, check whether we raced
                 * on adding the same user already..
                 */
-               spin_lock(&uidhash_lock);
+               spin_lock_irq(&uidhash_lock);
                up = uid_hash_find(xid, uid, hashent);
                if (up) {
+                       key_put(new->uid_keyring);
+                       key_put(new->session_keyring);
                        kmem_cache_free(uid_cachep, new);
                } else {
                        uid_hash_insert(new, hashent);
                        up = new;
                }
-               spin_unlock(&uidhash_lock);
+               spin_unlock_irq(&uidhash_lock);
 
        }
        return up;
@@ -145,8 +186,21 @@ void switch_uid(struct user_struct *new_user)
        old_user = current->user;
        atomic_inc(&new_user->processes);
        atomic_dec(&old_user->processes);
+       switch_uid_keyring(new_user);
        current->user = new_user;
+
+       /*
+        * We need to synchronize with __sigqueue_alloc()
+        * doing a get_uid(p->user).. If that saw the old
+        * user value, we need to wait until it has exited
+        * its critical region before we can free the old
+        * structure.
+        */
+       smp_mb();
+       spin_unlock_wait(&current->sighand->siglock);
+
        free_uid(old_user);
+       suid_keys(current);
 }
 
 
@@ -161,9 +215,9 @@ static int __init uid_cache_init(void)
                INIT_LIST_HEAD(uidhash_table + n);
 
        /* Insert the root user immediately (init already runs as root) */
-       spin_lock(&uidhash_lock);
+       spin_lock_irq(&uidhash_lock);
        uid_hash_insert(&root_user, uidhashentry(0,0));
-       spin_unlock(&uidhash_lock);
+       spin_unlock_irq(&uidhash_lock);
 
        return 0;
 }